spsl / spsl.yaml
ben-bitmind's picture
Update spsl.yaml
07fea5f verified
raw
history blame
2.94 kB
# log dir
log_dir: /mntcephfs/lab_data/zhiyuanyan/benchmark_results/logs_final/spsl_4frames
# model setting
pretrained: ../weights/xception_best.pth # path to a pre-trained model, if using one
# pretrained: /home/tianshuoge/resnet34-b627a593.pth # path to a pre-trained model, if using one
model_name: spsl # model name
backbone_name: xception # backbone name
#backbone setting
backbone_config:
mode: original # shallow_xception
num_classes: 2
inc: 4
dropout: false
# dataset
all_dataset: [FaceForensics++, FF-F2F, FF-DF, FF-FS, FF-NT, FaceShifter, DeepFakeDetection, Celeb-DF-v1, Celeb-DF-v2, DFDCP, DFDC, DeeperForensics-1.0, UADFV]
train_dataset: [FF-FS]
test_dataset: [FaceForensics++, FF-F2F, FF-DF, FF-FS, FF-NT]
compression: c23 # compression-level for videos
train_batchSize: 32 # training batch size
test_batchSize: 32 # test batch size
workers: 8 # number of data loading workers
frame_num: {'train': 4, 'test': 32} # number of frames to use per video in training and testing
resolution: 256 # resolution of output image to network
with_mask: false # whether to include mask information in the input
with_landmark: false # whether to include facial landmark information in the input
save_ckpt: true # whether to save checkpoint
save_feat: true # whether to save features
# data augmentation
use_data_augmentation: true # Add this flag to enable/disable data augmentation
data_aug:
flip_prob: 0.5
rotate_prob: 0.5
rotate_limit: [-10, 10]
blur_prob: 0.5
blur_limit: [3, 7]
brightness_prob: 0.5
brightness_limit: [-0.1, 0.1]
contrast_limit: [-0.1, 0.1]
quality_lower: 40
quality_upper: 100
# mean and std for normalization
mean: [0.5, 0.5, 0.5]
std: [0.5, 0.5, 0.5]
# optimizer config
optimizer:
# choose between 'adam' and 'sgd'
type: adam
adam:
lr: 0.0002 # learning rate
beta1: 0.9 # beta1 for Adam optimizer
beta2: 0.999 # beta2 for Adam optimizer
eps: 0.00000001 # epsilon for Adam optimizer
weight_decay: 0.0005 # weight decay for regularization
amsgrad: false
sgd:
lr: 0.0002 # learning rate
momentum: 0.9 # momentum for SGD optimizer
weight_decay: 0.0005 # weight decay for regularization
# training config
lr_scheduler: null # learning rate scheduler
nEpochs: 10 # number of epochs to train for
start_epoch: 0 # manual epoch number (useful for restarts)
save_epoch: 1 # interval epochs for saving models
rec_iter: 100 # interval iterations for recording
logdir: ./logs # folder to output images and logs
manualSeed: 1024 # manual seed for random number generation
save_ckpt: false # whether to save checkpoint
# loss function
loss_func: cross_entropy # loss function to use
losstype: null
# metric
metric_scoring: auc # metric for evaluation (auc, acc, eer, ap)
# cuda
cuda: true # whether to use CUDA acceleration
cudnn: true # whether to use CuDNN for convolution operations