remove det_mv3_db_v1.1.yml & add code annotation
This commit is contained in:
parent
2cdf7a666e
commit
b063599418
|
@ -24,6 +24,7 @@ Backbone:
|
|||
function: ppocr.modeling.backbones.det_mobilenet_v3,MobileNetV3
|
||||
scale: 0.5
|
||||
model_name: large
|
||||
disable_se: true
|
||||
|
||||
Head:
|
||||
function: ppocr.modeling.heads.det_db_head,DBHead
|
||||
|
|
|
@ -1,55 +0,0 @@
|
|||
Global:
|
||||
algorithm: DB
|
||||
use_gpu: true
|
||||
epoch_num: 1200
|
||||
log_smooth_window: 20
|
||||
print_batch_step: 2
|
||||
save_model_dir: ./output/det_db/
|
||||
save_epoch_step: 200
|
||||
# evaluation is run every 5000 iterations after the 4000th iteration
|
||||
eval_batch_step: [4000, 5000]
|
||||
train_batch_size_per_card: 16
|
||||
test_batch_size_per_card: 16
|
||||
image_shape: [3, 640, 640]
|
||||
reader_yml: ./configs/det/det_db_icdar15_reader.yml
|
||||
pretrain_weights: ./pretrain_models/MobileNetV3_large_x0_5_pretrained/
|
||||
checkpoints:
|
||||
save_res_path: ./output/det_db/predicts_db.txt
|
||||
save_inference_dir:
|
||||
|
||||
Architecture:
|
||||
function: ppocr.modeling.architectures.det_model,DetModel
|
||||
|
||||
Backbone:
|
||||
function: ppocr.modeling.backbones.det_mobilenet_v3,MobileNetV3
|
||||
scale: 0.5
|
||||
model_name: large
|
||||
disable_se: true
|
||||
|
||||
Head:
|
||||
function: ppocr.modeling.heads.det_db_head,DBHead
|
||||
model_name: large
|
||||
k: 50
|
||||
inner_channels: 96
|
||||
out_channels: 2
|
||||
|
||||
Loss:
|
||||
function: ppocr.modeling.losses.det_db_loss,DBLoss
|
||||
balance_loss: true
|
||||
main_loss_type: DiceLoss
|
||||
alpha: 5
|
||||
beta: 10
|
||||
ohem_ratio: 3
|
||||
|
||||
Optimizer:
|
||||
function: ppocr.optimizer,AdamDecay
|
||||
base_lr: 0.001
|
||||
beta1: 0.9
|
||||
beta2: 0.999
|
||||
|
||||
PostProcess:
|
||||
function: ppocr.postprocess.db_postprocess,DBPostProcess
|
||||
thresh: 0.3
|
||||
box_thresh: 0.6
|
||||
max_candidates: 1000
|
||||
unclip_ratio: 1.5
|
|
@ -204,6 +204,15 @@ def build(config, main_prog, startup_prog, mode):
|
|||
|
||||
def build_export(config, main_prog, startup_prog):
|
||||
"""
|
||||
Build input and output for exporting a checkpoints model to an inference model
|
||||
Args:
|
||||
config(dict): config
|
||||
main_prog(): main program
|
||||
startup_prog(): startup program
|
||||
Returns:
|
||||
feeded_var_names(list[str]): var names of input for exported inference model
|
||||
target_vars(list[Variable]): output vars for exported inference model
|
||||
fetches_var_name: dict of checkpoints model outputs(included loss and measures)
|
||||
"""
|
||||
with fluid.program_guard(main_prog, startup_prog):
|
||||
with fluid.unique_name.guard():
|
||||
|
@ -246,6 +255,9 @@ def train_eval_det_run(config,
|
|||
train_info_dict,
|
||||
eval_info_dict,
|
||||
is_pruning=False):
|
||||
'''
|
||||
main program of evaluation for detection
|
||||
'''
|
||||
train_batch_id = 0
|
||||
log_smooth_window = config['Global']['log_smooth_window']
|
||||
epoch_num = config['Global']['epoch_num']
|
||||
|
@ -337,6 +349,9 @@ def train_eval_det_run(config,
|
|||
|
||||
|
||||
def train_eval_rec_run(config, exe, train_info_dict, eval_info_dict):
|
||||
'''
|
||||
main program of evaluation for recognition
|
||||
'''
|
||||
train_batch_id = 0
|
||||
log_smooth_window = config['Global']['log_smooth_window']
|
||||
epoch_num = config['Global']['epoch_num']
|
||||
|
@ -513,6 +528,7 @@ def train_eval_cls_run(config, exe, train_info_dict, eval_info_dict):
|
|||
|
||||
|
||||
def preprocess():
|
||||
# load config from yml file
|
||||
FLAGS = ArgsParser().parse_args()
|
||||
config = load_config(FLAGS.config)
|
||||
merge_config(FLAGS.opt)
|
||||
|
@ -522,6 +538,7 @@ def preprocess():
|
|||
use_gpu = config['Global']['use_gpu']
|
||||
check_gpu(use_gpu)
|
||||
|
||||
# check whether the set algorithm belongs to the supported algorithm list
|
||||
alg = config['Global']['algorithm']
|
||||
assert alg in [
|
||||
'EAST', 'DB', 'SAST', 'Rosetta', 'CRNN', 'STARNet', 'RARE', 'SRN', 'CLS'
|
||||
|
|
|
@ -46,6 +46,7 @@ from paddle.fluid.contrib.model_stat import summary
|
|||
|
||||
|
||||
def main():
|
||||
# build train program
|
||||
train_build_outputs = program.build(
|
||||
config, train_program, startup_program, mode='train')
|
||||
train_loader = train_build_outputs[0]
|
||||
|
@ -54,6 +55,7 @@ def main():
|
|||
train_opt_loss_name = train_build_outputs[3]
|
||||
model_average = train_build_outputs[-1]
|
||||
|
||||
# build eval program
|
||||
eval_program = fluid.Program()
|
||||
eval_build_outputs = program.build(
|
||||
config, eval_program, startup_program, mode='eval')
|
||||
|
@ -61,9 +63,11 @@ def main():
|
|||
eval_fetch_varname_list = eval_build_outputs[2]
|
||||
eval_program = eval_program.clone(for_test=True)
|
||||
|
||||
# initialize train reader
|
||||
train_reader = reader_main(config=config, mode="train")
|
||||
train_loader.set_sample_list_generator(train_reader, places=place)
|
||||
|
||||
# initialize eval reader
|
||||
eval_reader = reader_main(config=config, mode="eval")
|
||||
|
||||
exe = fluid.Executor(place)
|
||||
|
|
Loading…
Reference in New Issue