deepke/example/re/few-shot/conf/train.yaml

84 lines
1.9 KiB
YAML

accelerator: None
accumulate_grad_batches: '1'
amp_backend: 'native'
amp_level: 'O2'
auto_lr_find: False
auto_scale_batch_size: False
auto_select_gpus: False
batch_size: 16
benchmark: False
check_val_every_n_epoch: '3'
checkpoint_callback: True
data_class: 'REDataset'
data_dir: 'data/k-shot/8-1'
default_root_dir: None
deterministic: False
devices: None
distributed_backend: None
fast_dev_run: False
flush_logs_every_n_steps: 100
gpus: None
gradient_accumulation_steps: 1
gradient_clip_algorithm: 'norm'
gradient_clip_val: 0.0
ipus: None
limit_predict_batches: 1.0
limit_test_batches: 1.0
limit_train_batches: 1.0
limit_val_batches: 1.0
litmodel_class: 'BertLitModel'
load_checkpoint: None
log_dir: './model_bert.log'
log_every_n_steps: 50
log_gpu_memory: None
logger: True
lr: 3e-05
lr_2: 3e-05
max_epochs: '30'
max_seq_length: 256
max_steps: None
max_time: None
min_epochs: None
min_steps: None
model_class: 'BertForMaskedLM'
model_name_or_path: 'bert-base-uncased'
move_metrics_to_cpu: False
multiple_trainloader_mode: 'max_size_cycle'
num_nodes: 1
num_processes: 1
num_sanity_val_steps: 2
num_train_epochs: 30
num_workers: 8
optimizer: 'AdamW'
overfit_batches: 0.0
plugins: None
precision: 32
prepare_data_per_node: True
process_position: 0
profiler: None
progress_bar_refresh_rate: None
ptune_k: 7
reload_dataloaders_every_epoch: False
reload_dataloaders_every_n_epochs: 0
replace_sampler_ddp: True
resume_from_checkpoint: None
save_path: './model_bert.pt'
seed: 666
stochastic_weight_avg: False
sync_batchnorm: False
t_lambda: 0.001
task_name: 'wiki80'
terminate_on_nan: False
tpu_cores: None
track_grad_norm: -1
train_from_saved_model: ''
truncated_bptt_steps: None
two_steps: False
use_prompt: True
val_check_interval: 1.0
wandb: False
weight_decay: 0.01
weights_save_path: None
weights_summary: 'top'
load_path: './model_bert.pt'