52 lines
1.1 KiB
YAML
Executable File
52 lines
1.1 KiB
YAML
Executable File
Global:
|
|
algorithm: RARE
|
|
use_gpu: true
|
|
epoch_num: 72
|
|
log_smooth_window: 20
|
|
print_batch_step: 10
|
|
save_model_dir: output/rec_RARE
|
|
save_epoch_step: 3
|
|
eval_batch_step: 2000
|
|
train_batch_size_per_card: 256
|
|
test_batch_size_per_card: 256
|
|
image_shape: [3, 32, 100]
|
|
max_text_length: 25
|
|
character_type: en
|
|
loss_type: attention
|
|
reader_yml: ./configs/rec/rec_benchmark_reader.yml
|
|
pretrain_weights:
|
|
checkpoints:
|
|
save_inference_dir:
|
|
|
|
Architecture:
|
|
function: ppocr.modeling.architectures.rec_model,RecModel
|
|
|
|
TPS:
|
|
function: ppocr.modeling.stns.tps,TPS
|
|
num_fiducial: 20
|
|
loc_lr: 0.1
|
|
model_name: small
|
|
|
|
Backbone:
|
|
function: ppocr.modeling.backbones.rec_mobilenet_v3,MobileNetV3
|
|
scale: 0.5
|
|
model_name: large
|
|
|
|
Head:
|
|
function: ppocr.modeling.heads.rec_attention_head,AttentionPredict
|
|
encoder_type: rnn
|
|
SeqRNN:
|
|
hidden_size: 96
|
|
Attention:
|
|
decoder_size: 96
|
|
word_vector_dim: 96
|
|
|
|
Loss:
|
|
function: ppocr.modeling.losses.rec_attention_loss,AttentionLoss
|
|
|
|
Optimizer:
|
|
function: ppocr.optimizer,AdamDecay
|
|
base_lr: 0.001
|
|
beta1: 0.9
|
|
beta2: 0.999
|