deepke/example/ner/standard/conf/train.yaml

25 lines
588 B
YAML

data_dir: "data/"
bert_model: "bert-base-chinese" # ["bert-base-chinese", "bert-base-cased"]
language: "cn" # ["cn", "en"]
task_name: "ner"
output_dir: "checkpoint"
max_seq_length: 128
do_train: True
do_eval: True
eval_on: "dev"
do_lower_case: True
train_batch_size: 32
eval_batch_size: 8
learning_rate: 5e-5
num_train_epochs: 1 # the number of training epochs
warmup_proportion: 0.1
weight_decay: 0.01
adam_epsilon: 1e-8
max_grad_norm: 1.0
no_cuda: False
local_rank: -1
seed: 42
gradient_accumulation_steps: 1
fp16: False
fp16_opt_level: "01"
loss_scale: 0.0