Merge pull request #291 from LDOUBLEV/fixeval

set start evaluation
This commit is contained in:
Double_V 2020-07-07 20:20:54 +08:00 committed by GitHub
commit 177d8fd9a5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 28 additions and 9 deletions

View File

@ -92,6 +92,7 @@ python3 tools/infer/predict_system.py --image_dir="./doc/imgs/11.jpg" --det_mode
- [文本检测模型训练/评估/预测](./doc/doc_ch/detection.md)
- [文本识别模型训练/评估/预测](./doc/doc_ch/recognition.md)
- [基于预测引擎推理](./doc/doc_ch/inference.md)
- [yml配置文件参数介绍](./doc/doc_ch/config_ch.md)
- [数据集](./doc/doc_ch/datasets.md)
- [FAQ](#FAQ)
- [联系我们](#欢迎加入PaddleOCR技术交流群)

View File

@ -92,7 +92,9 @@ For more text detection and recognition models, please refer to the document [In
- [Text detection model training/evaluation/prediction](./doc/doc_en/detection_en.md)
- [Text recognition model training/evaluation/prediction](./doc/doc_en/recognition_en.md)
- [Inference](./doc/doc_en/inference_en.md)
- [Introduction of yml file](./doc/doc_en/config_en.md)
- [Dataset](./doc/doc_en/datasets_en.md)
- [FAQ]((#FAQ)
## TEXT DETECTION ALGORITHM
@ -170,6 +172,7 @@ Please refer to the document for training guide and use of PaddleOCR text recogn
![](doc/imgs_results/chinese_db_crnn_server/2.jpg)
![](doc/imgs_results/chinese_db_crnn_server/8.jpg)
<a name="FAQ"></a>
## FAQ
1. Error when using attention-based recognition model: KeyError: 'predict'

View File

@ -6,7 +6,8 @@ Global:
print_batch_step: 2
save_model_dir: ./output/det_db/
save_epoch_step: 200
eval_batch_step: 5000
# evaluation is run every 5000 iterations after the 4000th iteration
eval_batch_step: [4000, 5000]
train_batch_size_per_card: 16
test_batch_size_per_card: 16
image_shape: [3, 640, 640]
@ -50,4 +51,4 @@ PostProcess:
thresh: 0.3
box_thresh: 0.7
max_candidates: 1000
unclip_ratio: 2.0
unclip_ratio: 2.0

View File

@ -6,7 +6,7 @@ Global:
print_batch_step: 5
save_model_dir: ./output/det_east/
save_epoch_step: 200
eval_batch_step: 5000
eval_batch_step: [5000, 5000]
train_batch_size_per_card: 16
test_batch_size_per_card: 16
image_shape: [3, 512, 512]

View File

@ -6,7 +6,7 @@ Global:
print_batch_step: 2
save_model_dir: ./output/det_db/
save_epoch_step: 200
eval_batch_step: 5000
eval_batch_step: [5000, 5000]
train_batch_size_per_card: 8
test_batch_size_per_card: 16
image_shape: [3, 640, 640]

View File

@ -6,7 +6,7 @@ Global:
print_batch_step: 5
save_model_dir: ./output/det_east/
save_epoch_step: 200
eval_batch_step: 5000
eval_batch_step: [5000, 5000]
train_batch_size_per_card: 8
test_batch_size_per_card: 16
image_shape: [3, 512, 512]

View File

@ -22,7 +22,7 @@
| print_batch_step | 设置打印log间隔 | 10 | \ |
| save_model_dir | 设置模型保存路径 | output/{算法名称} | \ |
| save_epoch_step | 设置模型保存间隔 | 3 | \ |
| eval_batch_step | 设置模型评估间隔 | 2000 | \ |
| eval_batch_step | 设置模型评估间隔 | 2000 或 [1000, 2000] | 2000 表示每2000次迭代评估一次[1000 2000]表示从1000次迭代开始每2000次评估一次 |
|train_batch_size_per_card | 设置训练时单卡batch size | 256 | \ |
| test_batch_size_per_card | 设置评估时单卡batch size | 256 | \ |
| image_shape | 设置输入图片尺寸 | [3, 32, 100] | \ |

View File

@ -22,7 +22,7 @@ Take `rec_chinese_lite_train.yml` as an example
| print_batch_step | Set print log interval | 10 | \ |
| save_model_dir | Set model save path | output/{model_name} | \ |
| save_epoch_step | Set model save interval | 3 | \ |
| eval_batch_step | Set the model evaluation interval | 2000 | \ |
| eval_batch_step | Set the model evaluation interval |2000 or [1000, 2000] |runing evaluation every 2000 iters or evaluation is run every 2000 iterations after the 1000th iteration |
|train_batch_size_per_card | Set the batch size during training | 256 | \ |
| test_batch_size_per_card | Set the batch size during testing | 256 | \ |
| image_shape | Set input image size | [3, 32, 100] | \ |

View File

@ -219,6 +219,13 @@ def train_eval_det_run(config, exe, train_info_dict, eval_info_dict):
epoch_num = config['Global']['epoch_num']
print_batch_step = config['Global']['print_batch_step']
eval_batch_step = config['Global']['eval_batch_step']
start_eval_step = 0
if type(eval_batch_step) == list and len(eval_batch_step) >= 2:
start_eval_step = eval_batch_step[0]
eval_batch_step = eval_batch_step[1]
logger.info(
"During the training process, after the {}th iteration, an evaluation is run every {} iterations".
format(start_eval_step, eval_batch_step))
save_epoch_step = config['Global']['save_epoch_step']
save_model_dir = config['Global']['save_model_dir']
if not os.path.exists(save_model_dir):
@ -246,7 +253,7 @@ def train_eval_det_run(config, exe, train_info_dict, eval_info_dict):
t2 = time.time()
train_batch_elapse = t2 - t1
train_stats.update(stats)
if train_batch_id > 0 and train_batch_id \
if train_batch_id > start_eval_step and (train_batch_id -start_eval_step) \
% print_batch_step == 0:
logs = train_stats.log()
strs = 'epoch: {}, iter: {}, {}, time: {:.3f}'.format(
@ -286,6 +293,13 @@ def train_eval_rec_run(config, exe, train_info_dict, eval_info_dict):
epoch_num = config['Global']['epoch_num']
print_batch_step = config['Global']['print_batch_step']
eval_batch_step = config['Global']['eval_batch_step']
start_eval_step = 0
if type(eval_batch_step) == list and len(eval_batch_step) >= 2:
start_eval_step = eval_batch_step[0]
eval_batch_step = eval_batch_step[1]
logger.info(
"During the training process, after the {}th iteration, an evaluation is run every {} iterations".
format(start_eval_step, eval_batch_step))
save_epoch_step = config['Global']['save_epoch_step']
save_model_dir = config['Global']['save_model_dir']
if not os.path.exists(save_model_dir):
@ -324,7 +338,7 @@ def train_eval_rec_run(config, exe, train_info_dict, eval_info_dict):
train_batch_elapse = t2 - t1
stats = {'loss': loss, 'acc': acc}
train_stats.update(stats)
if train_batch_id > 0 and train_batch_id \
if train_batch_id > start_eval_step and (train_batch_id - start_eval_step) \
% print_batch_step == 0:
logs = train_stats.log()
strs = 'epoch: {}, iter: {}, lr: {:.6f}, {}, time: {:.3f}'.format(