From 6a38af589028fe38cd8c165598519ce1f8d8ed45 Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Thu, 28 Jan 2021 11:03:58 +0800 Subject: [PATCH 01/77] fix starnet export --- tools/export_model.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tools/export_model.py b/tools/export_model.py index b7d61a59..a2428bf7 100755 --- a/tools/export_model.py +++ b/tools/export_model.py @@ -47,14 +47,18 @@ def main(): char_num = len(getattr(post_process_class, 'character')) config['Architecture']["Head"]['out_channels'] = char_num model = build_model(config['Architecture']) - init_model(config, model, logger) + # init_model(config, model, logger) model.eval() save_path = '{}/inference'.format(config['Global']['save_inference_dir']) - infer_shape = [3, -1, -1] + infer_shape = [3, -1, -1] if config['Architecture']['model_type'] == "rec": - infer_shape = [3, 32, -1] + infer_shape = [3, 32, -1] + if 'Transform' in config['Architecture'] and config['Architecture'][ + 'Transform'] is not None and config['Architecture'][ + 'Transform']['name'] == 'TPS': + infer_shape[-1] = 100 model = to_static( model, From b30add8ae51e51cc085750fa7f3a5f27a7f6f5ce Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Thu, 28 Jan 2021 11:05:28 +0800 Subject: [PATCH 02/77] fix bug --- tools/export_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/export_model.py b/tools/export_model.py index a2428bf7..542dffe3 100755 --- a/tools/export_model.py +++ b/tools/export_model.py @@ -47,7 +47,7 @@ def main(): char_num = len(getattr(post_process_class, 'character')) config['Architecture']["Head"]['out_channels'] = char_num model = build_model(config['Architecture']) - # init_model(config, model, logger) + init_model(config, model, logger) model.eval() save_path = '{}/inference'.format(config['Global']['save_inference_dir']) From 8e697e349fdb17d7fb7148ed1f79bec08936e207 Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Thu, 28 Jan 2021 13:21:10 +0800 Subject: [PATCH 03/77] add note --- tools/export_model.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/export_model.py b/tools/export_model.py index 542dffe3..a9b9e7dd 100755 --- a/tools/export_model.py +++ b/tools/export_model.py @@ -54,10 +54,13 @@ def main(): infer_shape = [3, -1, -1] if config['Architecture']['model_type'] == "rec": - infer_shape = [3, 32, -1] + infer_shape = [3, 32, -1] # for rec model, H must be 32 if 'Transform' in config['Architecture'] and config['Architecture'][ 'Transform'] is not None and config['Architecture'][ 'Transform']['name'] == 'TPS': + logger.info( + 'When there is tps in the network, variable length input is not supported, and the input size needs to be the same as during training' + ) infer_shape[-1] = 100 model = to_static( From 2a53789d1d180c09341a4d7f765cf64f15a436df Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Thu, 28 Jan 2021 19:12:44 +0800 Subject: [PATCH 04/77] fix doc of quant demo --- deploy/slim/quantization/README.md | 2 +- deploy/slim/quantization/README_en.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/slim/quantization/README.md b/deploy/slim/quantization/README.md index ccd4d06b..00cc7c58 100644 --- a/deploy/slim/quantization/README.md +++ b/deploy/slim/quantization/README.md @@ -42,7 +42,7 @@ python deploy/slim/quantization/quant.py -c configs/det/det_mv3_db.yml -o Global # 比如下载提供的训练模型 wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar tar -xf ch_ppocr_mobile_v2.0_det_train.tar -python deploy/slim/quantization/quant.py -c configs/det/det_mv3_db.yml -o Global.pretrain_weights=./ch_ppocr_mobile_v2.0_det_train/best_accuracy Global.save_model_dir=./output/quant_model +python deploy/slim/quantization/quant.py -c configs/det/det_mv3_db.yml -o Global.pretrain_weights=./ch_ppocr_mobile_v2.0_det_train/best_accuracy Global.save_inference_dir=./output/quant_model ``` 如果要训练识别模型的量化,修改配置文件和加载的模型参数即可。 diff --git a/deploy/slim/quantization/README_en.md b/deploy/slim/quantization/README_en.md index 7da0b3e7..36407a2b 100644 --- a/deploy/slim/quantization/README_en.md +++ b/deploy/slim/quantization/README_en.md @@ -58,7 +58,7 @@ python deploy/slim/quantization/quant.py -c configs/det/det_mv3_db.yml -o Global After getting the model after pruning and finetuning we, can export it as inference_model for predictive deployment: ```bash -python deploy/slim/quantization/export_model.py -c configs/det/det_mv3_db.yml -o Global.checkpoints=output/quant_model/best_accuracy Global.save_model_dir=./output/quant_inference_model +python deploy/slim/quantization/export_model.py -c configs/det/det_mv3_db.yml -o Global.checkpoints=output/quant_model/best_accuracy Global.save_inference_dir=./output/quant_inference_model ``` ### 5. Deploy From de37eedf26254ea3c5909a871191c06303b11a1c Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Thu, 28 Jan 2021 19:13:50 +0800 Subject: [PATCH 05/77] fix doc of quant demo --- deploy/slim/quantization/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/slim/quantization/README.md b/deploy/slim/quantization/README.md index 00cc7c58..4ac3f7c3 100644 --- a/deploy/slim/quantization/README.md +++ b/deploy/slim/quantization/README.md @@ -42,7 +42,7 @@ python deploy/slim/quantization/quant.py -c configs/det/det_mv3_db.yml -o Global # 比如下载提供的训练模型 wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar tar -xf ch_ppocr_mobile_v2.0_det_train.tar -python deploy/slim/quantization/quant.py -c configs/det/det_mv3_db.yml -o Global.pretrain_weights=./ch_ppocr_mobile_v2.0_det_train/best_accuracy Global.save_inference_dir=./output/quant_model +python deploy/slim/quantization/quant.py -c configs/det/det_mv3_db.yml -o Global.pretrain_weights=./ch_ppocr_mobile_v2.0_det_train/best_accuracy Global.save_inference_dir=./output/quant_inference_model ``` 如果要训练识别模型的量化,修改配置文件和加载的模型参数即可。 From ea4577adb78ee5b245916e4f1c6b46fbd4a5ff57 Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Fri, 29 Jan 2021 10:11:23 +0800 Subject: [PATCH 06/77] update docker --- deploy/docker/hubserving/gpu/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/docker/hubserving/gpu/Dockerfile b/deploy/docker/hubserving/gpu/Dockerfile index b7fa6f4c..144e1b35 100644 --- a/deploy/docker/hubserving/gpu/Dockerfile +++ b/deploy/docker/hubserving/gpu/Dockerfile @@ -1,5 +1,5 @@ # Version: 2.0.0 -FROM egistry.baidubce.com/paddlepaddle/paddle:2.0.0rc1-gpu-cuda10.0-cudnn7 +FROM registry.baidubce.com/paddlepaddle/paddle:2.0.0rc1-gpu-cuda10.0-cudnn7 # PaddleOCR base on Python3.7 RUN pip3.7 install --upgrade pip -i https://mirror.baidu.com/pypi/simple From 8fb01540e7dd55b37456552b4971be82f4612bd5 Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Fri, 29 Jan 2021 10:24:51 +0800 Subject: [PATCH 07/77] add paddle install --- deploy/docker/hubserving/cpu/Dockerfile | 2 ++ deploy/docker/hubserving/gpu/Dockerfile | 2 ++ 2 files changed, 4 insertions(+) diff --git a/deploy/docker/hubserving/cpu/Dockerfile b/deploy/docker/hubserving/cpu/Dockerfile index e46ca73b..62a01bee 100644 --- a/deploy/docker/hubserving/cpu/Dockerfile +++ b/deploy/docker/hubserving/cpu/Dockerfile @@ -4,6 +4,8 @@ FROM registry.baidubce.com/paddlepaddle/paddle:2.0.0rc1 # PaddleOCR base on Python3.7 RUN pip3.7 install --upgrade pip -i https://mirror.baidu.com/pypi/simple +RUN python3.7 -m pip install paddlepaddle==2.0.0 -i https://mirror.baidu.com/pypi/simple + RUN pip3.7 install paddlehub --upgrade -i https://mirror.baidu.com/pypi/simple RUN git clone https://github.com/PaddlePaddle/PaddleOCR.git /PaddleOCR diff --git a/deploy/docker/hubserving/gpu/Dockerfile b/deploy/docker/hubserving/gpu/Dockerfile index 144e1b35..7a80bc56 100644 --- a/deploy/docker/hubserving/gpu/Dockerfile +++ b/deploy/docker/hubserving/gpu/Dockerfile @@ -4,6 +4,8 @@ FROM registry.baidubce.com/paddlepaddle/paddle:2.0.0rc1-gpu-cuda10.0-cudnn7 # PaddleOCR base on Python3.7 RUN pip3.7 install --upgrade pip -i https://mirror.baidu.com/pypi/simple +RUN python3.7 -m pip install paddlepaddle-gpu==2.0.0 -i https://mirror.baidu.com/pypi/simple + RUN pip3.7 install paddlehub --upgrade -i https://mirror.baidu.com/pypi/simple RUN git clone https://github.com/PaddlePaddle/PaddleOCR.git /PaddleOCR From f6e03a51f0d958e772027d788badc395137f9f12 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Fri, 29 Jan 2021 03:15:03 +0000 Subject: [PATCH 08/77] upload rare code --- configs/rec/rec_mv3_tps_bilstm_att.yml | 102 +++++++++++ configs/rec/rec_r34_vd_tps_bilstm_att.yml | 103 +++++++++++ ppocr/data/imaug/label_ops.py | 24 ++- ppocr/losses/__init__.py | 5 +- ppocr/losses/rec_att_loss.py | 39 ++++ ppocr/modeling/heads/__init__.py | 5 +- ppocr/modeling/heads/rec_att_head.py | 211 ++++++++++++++++++++++ ppocr/postprocess/__init__.py | 3 +- ppocr/postprocess/rec_postprocess.py | 46 ++++- 9 files changed, 525 insertions(+), 13 deletions(-) create mode 100644 configs/rec/rec_mv3_tps_bilstm_att.yml create mode 100644 configs/rec/rec_r34_vd_tps_bilstm_att.yml create mode 100644 ppocr/losses/rec_att_loss.py create mode 100644 ppocr/modeling/heads/rec_att_head.py diff --git a/configs/rec/rec_mv3_tps_bilstm_att.yml b/configs/rec/rec_mv3_tps_bilstm_att.yml new file mode 100644 index 00000000..c64b2ccc --- /dev/null +++ b/configs/rec/rec_mv3_tps_bilstm_att.yml @@ -0,0 +1,102 @@ +Global: + use_gpu: true + epoch_num: 72 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/rec_mv3_tps_bilstm_att/ + save_epoch_step: 3 + # evaluation is run every 5000 iterations after the 4000th iteration + eval_batch_step: [0, 2000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words/ch/word_1.jpg + # for data or label process + character_dict_path: + character_type: en + max_text_length: 25 + infer_mode: False + use_space_char: False + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + learning_rate: 0.0005 + regularizer: + name: 'L2' + factor: 0.00001 + +Architecture: + model_type: rec + algorithm: RARE + Transform: + name: TPS + num_fiducial: 20 + loc_lr: 0.1 + model_name: small + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: large + Neck: + name: SequenceEncoder + encoder_type: rnn + hidden_size: 96 + Head: + name: AttentionHead + hidden_size: 96 + + +Loss: + name: AttentionLoss + +PostProcess: + name: AttnLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: LMDBDateSet + data_dir: ../training/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - AttnLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 100] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 256 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: LMDBDateSet + data_dir: ../validation/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - AttnLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 100] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 1 diff --git a/configs/rec/rec_r34_vd_tps_bilstm_att.yml b/configs/rec/rec_r34_vd_tps_bilstm_att.yml new file mode 100644 index 00000000..f42bfdcc --- /dev/null +++ b/configs/rec/rec_r34_vd_tps_bilstm_att.yml @@ -0,0 +1,103 @@ +Global: + use_gpu: true + epoch_num: 400 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/b3_rare_r34_none_gru/ + save_epoch_step: 3 + # evaluation is run every 5000 iterations after the 4000th iteration + eval_batch_step: [0, 2000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words/ch/word_1.jpg + # for data or label process + character_dict_path: + character_type: en + max_text_length: 25 + infer_mode: False + use_space_char: False + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + learning_rate: 0.0005 + regularizer: + name: 'L2' + factor: 0.00000 + +Architecture: + model_type: rec + algorithm: RARE + Transform: + name: TPS + num_fiducial: 20 + loc_lr: 0.1 + model_name: large + Backbone: + name: ResNet + layers: 34 + + Neck: + name: SequenceEncoder + encoder_type: rnn + hidden_size: 256 #96 + Head: + name: AttentionHead # AttentionHead + hidden_size: 256 # + l2_decay: 0.00001 + + +Loss: + name: AttentionLoss + +PostProcess: + name: AttnLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: LMDBDateSet + data_dir: ../training/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - AttnLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 100] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 256 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: LMDBDateSet + data_dir: ../validation/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - AttnLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 100] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 8 diff --git a/ppocr/data/imaug/label_ops.py b/ppocr/data/imaug/label_ops.py index 14c1cc9c..6d9ea190 100644 --- a/ppocr/data/imaug/label_ops.py +++ b/ppocr/data/imaug/label_ops.py @@ -197,16 +197,30 @@ class AttnLabelEncode(BaseRecLabelEncode): super(AttnLabelEncode, self).__init__(max_text_length, character_dict_path, character_type, use_space_char) - self.beg_str = "sos" - self.end_str = "eos" def add_special_char(self, dict_character): - dict_character = [self.beg_str, self.end_str] + dict_character + self.beg_str = "sos" + self.end_str = "eos" + dict_character = [self.beg_str] + dict_character + [self.end_str] return dict_character - def __call__(self, text): + def __call__(self, data): + text = data['label'] text = self.encode(text) - return text + if text is None: + return None + if len(text) > self.max_text_len: + return None + data['length'] = np.array(len(text)) + text = [0] + text + [len(self.character) - 1] + [0] * (self.max_text_len + - len(text) - 1) + data['label'] = np.array(text) + return data + + def get_ignored_tokens(self): + beg_idx = self.get_beg_end_flag_idx("beg") + end_idx = self.get_beg_end_flag_idx("end") + return [beg_idx, end_idx] def get_beg_end_flag_idx(self, beg_or_end): if beg_or_end == "beg": diff --git a/ppocr/losses/__init__.py b/ppocr/losses/__init__.py index 4673d35c..94314235 100755 --- a/ppocr/losses/__init__.py +++ b/ppocr/losses/__init__.py @@ -23,11 +23,14 @@ def build_loss(config): # rec loss from .rec_ctc_loss import CTCLoss + from .rec_att_loss import AttentionLoss # cls loss from .cls_loss import ClsLoss - support_dict = ['DBLoss', 'EASTLoss', 'SASTLoss', 'CTCLoss', 'ClsLoss'] + support_dict = [ + 'DBLoss', 'EASTLoss', 'SASTLoss', 'CTCLoss', 'ClsLoss', 'AttentionLoss' + ] config = copy.deepcopy(config) module_name = config.pop('name') diff --git a/ppocr/losses/rec_att_loss.py b/ppocr/losses/rec_att_loss.py new file mode 100644 index 00000000..6e2f6748 --- /dev/null +++ b/ppocr/losses/rec_att_loss.py @@ -0,0 +1,39 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle +from paddle import nn + + +class AttentionLoss(nn.Layer): + def __init__(self, **kwargs): + super(AttentionLoss, self).__init__() + self.loss_func = nn.CrossEntropyLoss(weight=None, reduction='none') + + def forward(self, predicts, batch): + targets = batch[1].astype("int64") + label_lengths = batch[2].astype('int64') + batch_size, num_steps, num_classes = predicts.shape[0], predicts.shape[ + 1], predicts.shape[2] + assert len(targets.shape) == len(list(predicts.shape)) - 1, \ + "The target's shape and inputs's shape is [N, d] and [N, num_steps]" + + inputs = paddle.reshape(predicts, [-1, predicts.shape[-1]]) + targets = paddle.reshape(targets, [-1]) + + return {'loss': paddle.sum(self.loss_func(inputs, targets))} diff --git a/ppocr/modeling/heads/__init__.py b/ppocr/modeling/heads/__init__.py index 78074709..29d0ba80 100755 --- a/ppocr/modeling/heads/__init__.py +++ b/ppocr/modeling/heads/__init__.py @@ -23,10 +23,13 @@ def build_head(config): # rec head from .rec_ctc_head import CTCHead + from .rec_att_head import AttentionHead # cls head from .cls_head import ClsHead - support_dict = ['DBHead', 'EASTHead', 'SASTHead', 'CTCHead', 'ClsHead'] + support_dict = [ + 'DBHead', 'EASTHead', 'SASTHead', 'CTCHead', 'ClsHead', 'AttentionHead' + ] module_name = config.pop('name') assert module_name in support_dict, Exception('head only support {}'.format( diff --git a/ppocr/modeling/heads/rec_att_head.py b/ppocr/modeling/heads/rec_att_head.py new file mode 100644 index 00000000..d01f0e6c --- /dev/null +++ b/ppocr/modeling/heads/rec_att_head.py @@ -0,0 +1,211 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +import numpy as np +from paddle.jit import to_static + + +class AttentionHead(nn.Layer): + def __init__(self, in_channels, out_channels, hidden_size, **kwargs): + super(AttentionHead, self).__init__() + self.input_size = in_channels + self.hidden_size = hidden_size + self.num_classes = out_channels + + self.attention_cell = AttentionGRUCell( + in_channels, hidden_size, out_channels, use_gru=False) + self.generator = nn.Linear(hidden_size, out_channels) + + def _char_to_onehot(self, input_char, onehot_dim): + input_ont_hot = F.one_hot(input_char, onehot_dim) + return input_ont_hot + + def forward(self, inputs, targets=None, batch_max_length=25): + batch_size = inputs.shape[0] + num_steps = batch_max_length + + hidden = paddle.zeros((batch_size, self.hidden_size)) + output_hiddens = [] + + if targets is not None: + for i in range(num_steps): + char_onehots = self._char_to_onehot( + targets[:, i], onehot_dim=self.num_classes) + (outputs, hidden), alpha = self.attention_cell(hidden, inputs, + char_onehots) + output_hiddens.append(paddle.unsqueeze(outputs, axis=1)) + output = paddle.concat(output_hiddens, axis=1) + probs = self.generator(output) + + else: + targets = paddle.zeros(shape=[batch_size], dtype="int32") + probs = None + + for i in range(num_steps): + char_onehots = self._char_to_onehot( + targets, onehot_dim=self.num_classes) + (outputs, hidden), alpha = self.attention_cell(hidden, inputs, + char_onehots) + probs_step = self.generator(outputs) + probs = paddle.unsqueeze( + probs_step, axis=1) if probs is None else paddle.concat( + [probs, paddle.unsqueeze( + probs_step, axis=1)], axis=1) + next_input = probs_step.argmax(axis=1) + targets = next_input + + return probs + + +class AttentionGRUCell(nn.Layer): + def __init__(self, input_size, hidden_size, num_embeddings, use_gru=False): + super(AttentionGRUCell, self).__init__() + self.i2h = nn.Linear(input_size, hidden_size, bias_attr=False) + self.h2h = nn.Linear(hidden_size, hidden_size) + self.score = nn.Linear(hidden_size, 1, bias_attr=False) + + self.rnn = nn.GRUCell( + input_size=input_size + num_embeddings, hidden_size=hidden_size) + + self.hidden_size = hidden_size + + def forward(self, prev_hidden, batch_H, char_onehots): + + batch_H_proj = self.i2h(batch_H) + prev_hidden_proj = paddle.unsqueeze(self.h2h(prev_hidden), axis=1) + + res = paddle.add(batch_H_proj, prev_hidden_proj) + res = paddle.tanh(res) + e = self.score(res) + + alpha = F.softmax(e, axis=1) + alpha = paddle.transpose(alpha, [0, 2, 1]) + context = paddle.squeeze(paddle.mm(alpha, batch_H), axis=1) + concat_context = paddle.concat([context, char_onehots], 1) + + cur_hidden = self.rnn(concat_context, prev_hidden) + + return cur_hidden, alpha + + +class AttentionLSTM(nn.Layer): + def __init__(self, in_channels, out_channels, hidden_size, **kwargs): + super(AttentionLSTM, self).__init__() + self.input_size = in_channels + self.hidden_size = hidden_size + self.num_classes = out_channels + + self.attention_cell = AttentionLSTMCell( + in_channels, hidden_size, out_channels, use_gru=False) + self.generator = nn.Linear(hidden_size, out_channels) + + def _char_to_onehot(self, input_char, onehot_dim): + input_ont_hot = F.one_hot(input_char, onehot_dim) + return input_ont_hot + + def forward(self, inputs, targets=None, batch_max_length=25): + batch_size = inputs.shape[0] + num_steps = batch_max_length + + hidden = (paddle.zeros((batch_size, self.hidden_size)), paddle.zeros( + (batch_size, self.hidden_size))) + output_hiddens = [] + + if targets is not None: + for i in range(num_steps): + # one-hot vectors for a i-th char + char_onehots = self._char_to_onehot( + targets[:, i], onehot_dim=self.num_classes) + hidden, alpha = self.attention_cell(hidden, inputs, + char_onehots) + + hidden = (hidden[1][0], hidden[1][1]) + output_hiddens.append(paddle.unsqueeze(hidden[0], axis=1)) + output = paddle.concat(output_hiddens, axis=1) + probs = self.generator(output) + + else: + targets = paddle.zeros(shape=[batch_size], dtype="int32") + probs = None + + for i in range(num_steps): + char_onehots = self._char_to_onehot( + targets, onehot_dim=self.num_classes) + hidden, alpha = self.attention_cell(hidden, inputs, + char_onehots) + probs_step = self.generator(hidden[0]) + hidden = (hidden[1][0], hidden[1][1]) + probs = paddle.unsqueeze( + probs_step, axis=1) if probs is None else paddle.concat( + [probs, paddle.unsqueeze( + probs_step, axis=1)], axis=1) + + next_input = probs_step.argmax(axis=1) + + targets = next_input + + return probs + + +class AttentionLSTMCell(nn.Layer): + def __init__(self, input_size, hidden_size, num_embeddings, use_gru=False): + super(AttentionLSTMCell, self).__init__() + self.i2h = nn.Linear(input_size, hidden_size, bias_attr=False) + self.h2h = nn.Linear(hidden_size, hidden_size) + self.score = nn.Linear(hidden_size, 1, bias_attr=False) + if not use_gru: + self.rnn = nn.LSTMCell( + input_size=input_size + num_embeddings, hidden_size=hidden_size) + else: + self.rnn = nn.GRUCell( + input_size=input_size + num_embeddings, hidden_size=hidden_size) + + self.hidden_size = hidden_size + + def forward(self, prev_hidden, batch_H, char_onehots): + batch_H_proj = self.i2h(batch_H) + prev_hidden_proj = paddle.unsqueeze(self.h2h(prev_hidden[0]), axis=1) + res = paddle.add(batch_H_proj, prev_hidden_proj) + res = paddle.tanh(res) + e = self.score(res) + + alpha = F.softmax(e, axis=1) + alpha = paddle.transpose(alpha, [0, 2, 1]) + context = paddle.squeeze(paddle.mm(alpha, batch_H), axis=1) + concat_context = paddle.concat([context, char_onehots], 1) + cur_hidden = self.rnn(concat_context, prev_hidden) + + return cur_hidden, alpha + + +if __name__ == '__main__': + paddle.disable_static() + + model = Attention(100, 200, 10) + + x = np.random.uniform(-1, 1, [2, 10, 100]).astype(np.float32) + y = np.random.randint(0, 10, [2, 21]).astype(np.int32) + + xp = paddle.to_tensor(x) + yp = paddle.to_tensor(y) + + res = model(inputs=xp, targets=yp, is_train=True, batch_max_length=20) + print("res: ", res.shape) diff --git a/ppocr/postprocess/__init__.py b/ppocr/postprocess/__init__.py index c9b42e08..2b8d00a9 100644 --- a/ppocr/postprocess/__init__.py +++ b/ppocr/postprocess/__init__.py @@ -30,7 +30,8 @@ def build_post_process(config, global_config=None): from .cls_postprocess import ClsPostProcess support_dict = [ - 'DBPostProcess', 'EASTPostProcess', 'SASTPostProcess', 'CTCLabelDecode', 'AttnLabelDecode', 'ClsPostProcess' + 'DBPostProcess', 'EASTPostProcess', 'SASTPostProcess', 'CTCLabelDecode', + 'AttnLabelDecode', 'ClsPostProcess', 'AttnLabelDecode' ] config = copy.deepcopy(config) diff --git a/ppocr/postprocess/rec_postprocess.py b/ppocr/postprocess/rec_postprocess.py index 65ed4671..1ac35246 100644 --- a/ppocr/postprocess/rec_postprocess.py +++ b/ppocr/postprocess/rec_postprocess.py @@ -133,16 +133,52 @@ class AttnLabelDecode(BaseRecLabelDecode): **kwargs): super(AttnLabelDecode, self).__init__(character_dict_path, character_type, use_space_char) - self.beg_str = "sos" - self.end_str = "eos" def add_special_char(self, dict_character): - dict_character = [self.beg_str, self.end_str] + dict_character + self.beg_str = "sos" + self.end_str = "eos" + dict_character = dict_character + dict_character = [self.beg_str] + dict_character + [self.end_str] return dict_character - def __call__(self, text): + def __call__(self, preds, label=None, *args, **kwargs): + """ text = self.decode(text) - return text + if label is None: + return text + else: + label = self.decode(label, is_remove_duplicate=False) + return text, label + """ + if isinstance(preds, paddle.Tensor): + preds = preds.numpy() + + preds_idx = preds.argmax(axis=2) + preds_prob = preds.max(axis=2) + text = self.decode(preds_idx, preds_prob, is_remove_duplicate=True) + if label is None: + return text + label = self.decode(label, is_remove_duplicate=True) + return text, label + + def encoder(self, labels, labels_length): + """ + used to encoder labels readed from LMDB dataset, forexample: + [35, 25, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] encode to + 'you': [0, 35,25,31, 37, 0, ...] 'sos'you'eos' + """ + if isinstance(labels, paddle.Tensor): + labels = labels.numpy() + batch_max_length = labels.shape[ + 1] + 2 # add start token 'sos' and end token 'eos' + new_labels = np.zeros( + [labels.shape[0], batch_max_length]).astype(np.int64) + for i in range(labels.shape[0]): + new_labels[i, 1:1 + labels_length[i]] = labels[i, :labels_length[ + i]] # new_labels[i, 0] = 'sos' token + new_labels[i, labels_length[i] + 1] = len( + self.character) - 1 # add end charactor 'eos' token + return new_labels def get_ignored_tokens(self): beg_idx = self.get_beg_end_flag_idx("beg") From 42fe741ff18381df2fc00b665f0b4585ab065fd7 Mon Sep 17 00:00:00 2001 From: tink2123 Date: Fri, 29 Jan 2021 15:08:58 +0800 Subject: [PATCH 09/77] add srn doc --- doc/doc_ch/algorithm_overview.md | 4 ++-- doc/doc_ch/inference.md | 21 +++++++++++++++++---- doc/doc_ch/recognition.md | 2 ++ doc/doc_en/algorithm_overview_en.md | 4 ++-- doc/doc_en/inference_en.md | 20 ++++++++++++++++++-- doc/doc_en/recognition_en.md | 1 + 6 files changed, 42 insertions(+), 10 deletions(-) diff --git a/doc/doc_ch/algorithm_overview.md b/doc/doc_ch/algorithm_overview.md index 59d1bc8c..f0765695 100755 --- a/doc/doc_ch/algorithm_overview.md +++ b/doc/doc_ch/algorithm_overview.md @@ -41,7 +41,7 @@ PaddleOCR基于动态图开源的文本识别算法列表: - [x] Rosetta([paper](https://arxiv.org/abs/1910.05085))[10] - [x] STAR-Net([paper](http://www.bmva.org/bmvc/2016/papers/paper043/index.html))[11] - [ ] RARE([paper](https://arxiv.org/abs/1603.03915v1))[12] coming soon -- [ ] SRN([paper](https://arxiv.org/abs/2003.12294))[5] coming soon +- [x] SRN([paper](https://arxiv.org/abs/2003.12294))[5] 参考[DTRB][3](https://arxiv.org/abs/1904.01906)文字识别训练和评估流程,使用MJSynth和SynthText两个文字识别数据集训练,在IIIT, SVT, IC03, IC13, IC15, SVTP, CUTE数据集上进行评估,算法效果如下: @@ -53,5 +53,5 @@ PaddleOCR基于动态图开源的文本识别算法列表: |CRNN|MobileNetV3|79.97%|rec_mv3_none_bilstm_ctc|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_none_bilstm_ctc_v2.0_train.tar)| |StarNet|Resnet34_vd|84.44%|rec_r34_vd_tps_bilstm_ctc|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_ctc_v2.0_train.tar)| |StarNet|MobileNetV3|81.42%|rec_mv3_tps_bilstm_ctc|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_ctc_v2.0_train.tar)| - +|SRN|Resnet50_vd_fpn| 88.52% | rec_r50fpn_vd_none_srn | [下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r50_vd_srn_train.tar) | PaddleOCR文本识别算法的训练和使用请参考文档教程中[模型训练/评估中的文本识别部分](./recognition.md)。 diff --git a/doc/doc_ch/inference.md b/doc/doc_ch/inference.md index c4601e15..0daddd9b 100755 --- a/doc/doc_ch/inference.md +++ b/doc/doc_ch/inference.md @@ -22,8 +22,9 @@ inference 模型(`paddle.jit.save`保存的模型) - [三、文本识别模型推理](#文本识别模型推理) - [1. 超轻量中文识别模型推理](#超轻量中文识别模型推理) - [2. 基于CTC损失的识别模型推理](#基于CTC损失的识别模型推理) - - [3. 自定义文本识别字典的推理](#自定义文本识别字典的推理) - - [4. 多语言模型的推理](#多语言模型的推理) + - [3. 基于SRN损失的识别模型推理](#基于SRN损失的识别模型推理) + - [4. 自定义文本识别字典的推理](#自定义文本识别字典的推理) + - [5. 多语言模型的推理](#多语言模型的推理) - [四、方向分类模型推理](#方向识别模型推理) - [1. 方向分类模型推理](#方向分类模型推理) @@ -295,8 +296,20 @@ Predicts of ./doc/imgs_words_en/word_336.png:('super', 0.9999073) self.character_str = "0123456789abcdefghijklmnopqrstuvwxyz" dict_character = list(self.character_str) ``` + +### 3. 基于SRN损失的识别模型推理 +基于SRN损失的识别模型,需要额外设置识别算法参数 --rec_algorithm="SRN"。 +同时需要保证预测shape与训练时一致,如: --rec_image_shape="1, 64, 256" -### 3. 自定义文本识别字典的推理 +``` +python3 tools/infer/predict_rec.py --image_dir="./doc/imgs_words_en/word_336.png" \ + --rec_model_dir="./inference/srn/" \ + --rec_image_shape="1, 64, 256" \ + --rec_char_type="en" \ + --rec_algorithm="SRN" +``` + +### 4. 自定义文本识别字典的推理 如果训练时修改了文本的字典,在使用inference模型预测时,需要通过`--rec_char_dict_path`指定使用的字典路径,并且设置 `rec_char_type=ch` ``` @@ -304,7 +317,7 @@ python3 tools/infer/predict_rec.py --image_dir="./doc/imgs_words_en/word_336.png ``` -### 4. 多语言模型的推理 +### 5. 多语言模型的推理 如果您需要预测的是其他语言模型,在使用inference模型预测时,需要通过`--rec_char_dict_path`指定使用的字典路径, 同时为了得到正确的可视化结果, 需要通过 `--vis_font_path` 指定可视化的字体路径,`doc/fonts/` 路径下有默认提供的小语种字体,例如韩文识别: diff --git a/doc/doc_ch/recognition.md b/doc/doc_ch/recognition.md index c5f459bd..bc877ab7 100644 --- a/doc/doc_ch/recognition.md +++ b/doc/doc_ch/recognition.md @@ -36,6 +36,7 @@ ln -sf /train_data/dataset * 数据下载 若您本地没有数据集,可以在官网下载 [icdar2015](http://rrc.cvc.uab.es/?ch=4&com=downloads) 数据,用于快速验证。也可以参考[DTRB](https://github.com/clovaai/deep-text-recognition-benchmark#download-lmdb-dataset-for-traininig-and-evaluation-from-here),下载 benchmark 所需的lmdb格式数据集。 +如果希望复现SRN的论文指标,需要下载离线[增广数据](https://pan.baidu.com/s/1-HSZ-ZVdqBF2HaBZ5pRAKA),提取码: y3ry。增广数据是由MJSynth和SynthText做旋转和扰动得到的。数据下载完成后请解压到 {your_path}/PaddleOCR/train_data/data_lmdb_release/training/ 路径下。 * 使用自己数据集 @@ -200,6 +201,7 @@ PaddleOCR支持训练和评估交替进行, 可以在 `configs/rec/rec_icdar15_t | rec_mv3_none_none_ctc.yml | Rosetta | Mobilenet_v3 large 0.5 | None | None | ctc | | rec_r34_vd_none_bilstm_ctc.yml | CRNN | Resnet34_vd | None | BiLSTM | ctc | | rec_r34_vd_none_none_ctc.yml | Rosetta | Resnet34_vd | None | None | ctc | +| rec_r50fpn_vd_none_srn.yml | SRN | Resnet50_fpn_vd | None | rnn | srn | 训练中文数据,推荐使用[rec_chinese_lite_train_v2.0.yml](../../configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml),如您希望尝试其他算法在中文数据集上的效果,请参考下列说明修改配置文件: diff --git a/doc/doc_en/algorithm_overview_en.md b/doc/doc_en/algorithm_overview_en.md index 68bfd529..5016223f 100755 --- a/doc/doc_en/algorithm_overview_en.md +++ b/doc/doc_en/algorithm_overview_en.md @@ -43,7 +43,7 @@ PaddleOCR open-source text recognition algorithms list: - [x] Rosetta([paper](https://arxiv.org/abs/1910.05085))[10] - [x] STAR-Net([paper](http://www.bmva.org/bmvc/2016/papers/paper043/index.html))[11] - [ ] RARE([paper](https://arxiv.org/abs/1603.03915v1))[12] coming soon -- [ ] SRN([paper](https://arxiv.org/abs/2003.12294))[5] coming soon +- [x] SRN([paper](https://arxiv.org/abs/2003.12294))[5] Refer to [DTRB](https://arxiv.org/abs/1904.01906), the training and evaluation result of these above text recognition (using MJSynth and SynthText for training, evaluate on IIIT, SVT, IC03, IC13, IC15, SVTP, CUTE) is as follow: @@ -55,5 +55,5 @@ Refer to [DTRB](https://arxiv.org/abs/1904.01906), the training and evaluation r |CRNN|MobileNetV3|79.97%|rec_mv3_none_bilstm_ctc|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_none_bilstm_ctc_v2.0_train.tar)| |StarNet|Resnet34_vd|84.44%|rec_r34_vd_tps_bilstm_ctc|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_ctc_v2.0_train.tar)| |StarNet|MobileNetV3|81.42%|rec_mv3_tps_bilstm_ctc|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_ctc_v2.0_train.tar)| - +|SRN|Resnet50_vd_fpn| 88.52% | rec_r50fpn_vd_none_srn |[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r50_vd_srn_train.tar)| Please refer to the document for training guide and use of PaddleOCR text recognition algorithms [Text recognition model training/evaluation/prediction](./recognition_en.md) diff --git a/doc/doc_en/inference_en.md b/doc/doc_en/inference_en.md index ccbb7184..c8ce1424 100755 --- a/doc/doc_en/inference_en.md +++ b/doc/doc_en/inference_en.md @@ -25,6 +25,7 @@ Next, we first introduce how to convert a trained model into an inference model, - [TEXT RECOGNITION MODEL INFERENCE](#RECOGNITION_MODEL_INFERENCE) - [1. LIGHTWEIGHT CHINESE MODEL](#LIGHTWEIGHT_RECOGNITION) - [2. CTC-BASED TEXT RECOGNITION MODEL INFERENCE](#CTC-BASED_RECOGNITION) + - [3. SRN-BASED TEXT RECOGNITION MODEL INFERENCE](#SRN-BASED_RECOGNITION) - [3. TEXT RECOGNITION MODEL INFERENCE USING CUSTOM CHARACTERS DICTIONARY](#USING_CUSTOM_CHARACTERS) - [4. MULTILINGUAL MODEL INFERENCE](MULTILINGUAL_MODEL_INFERENCE) @@ -304,8 +305,23 @@ self.character_str = "0123456789abcdefghijklmnopqrstuvwxyz" dict_character = list(self.character_str) ``` + +### 3. SRN-BASED TEXT RECOGNITION MODEL INFERENCE + +The recognition model based on SRN requires additional setting of the recognition algorithm parameter +--rec_algorithm="SRN". At the same time, it is necessary to ensure that the predicted shape is consistent +with the training, such as: --rec_image_shape="1, 64, 256" + +``` +python3 tools/infer/predict_rec.py --image_dir="./doc/imgs_words_en/word_336.png" \ + --rec_model_dir="./inference/srn/" \ + --rec_image_shape="1, 64, 256" \ + --rec_char_type="en" \ + --rec_algorithm="SRN" +``` + -### 3. TEXT RECOGNITION MODEL INFERENCE USING CUSTOM CHARACTERS DICTIONARY +### 4. TEXT RECOGNITION MODEL INFERENCE USING CUSTOM CHARACTERS DICTIONARY If the text dictionary is modified during training, when using the inference model to predict, you need to specify the dictionary path used by `--rec_char_dict_path`, and set `rec_char_type=ch` ``` @@ -313,7 +329,7 @@ python3 tools/infer/predict_rec.py --image_dir="./doc/imgs_words_en/word_336.png ``` -### 4. MULTILINGAUL MODEL INFERENCE +### 5. MULTILINGAUL MODEL INFERENCE If you need to predict other language models, when using inference model prediction, you need to specify the dictionary path used by `--rec_char_dict_path`. At the same time, in order to get the correct visualization results, You need to specify the visual font path through `--vis_font_path`. There are small language fonts provided by default under the `doc/fonts` path, such as Korean recognition: diff --git a/doc/doc_en/recognition_en.md b/doc/doc_en/recognition_en.md index 22f89cde..f29703d1 100644 --- a/doc/doc_en/recognition_en.md +++ b/doc/doc_en/recognition_en.md @@ -195,6 +195,7 @@ If the evaluation set is large, the test will be time-consuming. It is recommend | rec_mv3_none_none_ctc.yml | Rosetta | Mobilenet_v3 large 0.5 | None | None | ctc | | rec_r34_vd_none_bilstm_ctc.yml | CRNN | Resnet34_vd | None | BiLSTM | ctc | | rec_r34_vd_none_none_ctc.yml | Rosetta | Resnet34_vd | None | None | ctc | +| rec_r50fpn_vd_none_srn.yml | SRN | Resnet50_fpn_vd | None | rnn | srn | For training Chinese data, it is recommended to use [rec_chinese_lite_train_v2.0.yml](../../configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml). If you want to try the result of other algorithms on the Chinese data set, please refer to the following instructions to modify the configuration file: From 6781d55df4a705b1d0d7201e5fc6b484d4912a9b Mon Sep 17 00:00:00 2001 From: tink2123 Date: Fri, 29 Jan 2021 15:23:11 +0800 Subject: [PATCH 10/77] format doc --- doc/doc_ch/algorithm_overview.md | 1 + doc/doc_en/algorithm_overview_en.md | 1 + 2 files changed, 2 insertions(+) diff --git a/doc/doc_ch/algorithm_overview.md b/doc/doc_ch/algorithm_overview.md index f0765695..abbc5da4 100755 --- a/doc/doc_ch/algorithm_overview.md +++ b/doc/doc_ch/algorithm_overview.md @@ -54,4 +54,5 @@ PaddleOCR基于动态图开源的文本识别算法列表: |StarNet|Resnet34_vd|84.44%|rec_r34_vd_tps_bilstm_ctc|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_ctc_v2.0_train.tar)| |StarNet|MobileNetV3|81.42%|rec_mv3_tps_bilstm_ctc|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_ctc_v2.0_train.tar)| |SRN|Resnet50_vd_fpn| 88.52% | rec_r50fpn_vd_none_srn | [下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r50_vd_srn_train.tar) | + PaddleOCR文本识别算法的训练和使用请参考文档教程中[模型训练/评估中的文本识别部分](./recognition.md)。 diff --git a/doc/doc_en/algorithm_overview_en.md b/doc/doc_en/algorithm_overview_en.md index 5016223f..7d7896e7 100755 --- a/doc/doc_en/algorithm_overview_en.md +++ b/doc/doc_en/algorithm_overview_en.md @@ -56,4 +56,5 @@ Refer to [DTRB](https://arxiv.org/abs/1904.01906), the training and evaluation r |StarNet|Resnet34_vd|84.44%|rec_r34_vd_tps_bilstm_ctc|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_ctc_v2.0_train.tar)| |StarNet|MobileNetV3|81.42%|rec_mv3_tps_bilstm_ctc|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_ctc_v2.0_train.tar)| |SRN|Resnet50_vd_fpn| 88.52% | rec_r50fpn_vd_none_srn |[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r50_vd_srn_train.tar)| + Please refer to the document for training guide and use of PaddleOCR text recognition algorithms [Text recognition model training/evaluation/prediction](./recognition_en.md) From 2a0c3d4dac67cfd49e432303443bb9a50e75071f Mon Sep 17 00:00:00 2001 From: xiaoting <31891223+tink2123@users.noreply.github.com> Date: Sun, 31 Jan 2021 22:37:30 +0800 Subject: [PATCH 11/77] fix eval mode without srn (#1889) * fix base model * fix start time --- tools/program.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tools/program.py b/tools/program.py index 694d6415..f3ba4945 100755 --- a/tools/program.py +++ b/tools/program.py @@ -326,9 +326,12 @@ def eval(model, valid_dataloader, post_process_class, eval_class): if idx >= len(valid_dataloader): break images = batch[0] - others = batch[-4:] start = time.time() - preds = model(images, others) + if "SRN" in str(model.head): + others = batch[-4:] + preds = model(images, others) + else: + preds = model(images) batch = [item.numpy() for item in batch] # Obtain usable results from post-processing methods From f896032255566ffa3194e0d8f3843e7131a13ca7 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Mon, 1 Feb 2021 03:12:38 +0000 Subject: [PATCH 12/77] pre-commit --- tools/infer/predict_system.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/infer/predict_system.py b/tools/infer/predict_system.py index 8c4f9214..de7ee9d3 100755 --- a/tools/infer/predict_system.py +++ b/tools/infer/predict_system.py @@ -184,4 +184,4 @@ def main(args): if __name__ == "__main__": - main(utility.parse_args()) \ No newline at end of file + main(utility.parse_args()) From 7f2304ab3fbb2846b365f8293b4835a3120160df Mon Sep 17 00:00:00 2001 From: tink2123 Date: Mon, 1 Feb 2021 03:28:31 +0000 Subject: [PATCH 13/77] Adaptation of Chinese char --- ppocr/modeling/heads/rec_srn_head.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ppocr/modeling/heads/rec_srn_head.py b/ppocr/modeling/heads/rec_srn_head.py index 8aaf65e1..d2c7fc02 100644 --- a/ppocr/modeling/heads/rec_srn_head.py +++ b/ppocr/modeling/heads/rec_srn_head.py @@ -246,7 +246,7 @@ class SRNHead(nn.Layer): num_encoder_tus=self.num_encoder_TUs, num_decoder_tus=self.num_decoder_TUs, hidden_dims=self.hidden_dims) - self.vsfd = VSFD(in_channels=in_channels) + self.vsfd = VSFD(in_channels=in_channels, char_num=self.char_num) self.gsrm.wrap_encoder1.prepare_decoder.emb0 = self.gsrm.wrap_encoder0.prepare_decoder.emb0 From b544a561d55aafb63ef44593f327497b88935a91 Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Mon, 1 Feb 2021 12:49:11 +0800 Subject: [PATCH 14/77] update faq --- README_ch.md | 2 +- doc/doc_ch/FAQ.md | 57 +++++++++++++++++++++++++++++++++-------------- 2 files changed, 41 insertions(+), 18 deletions(-) diff --git a/README_ch.md b/README_ch.md index 2de6fdf5..030eb698 100755 --- a/README_ch.md +++ b/README_ch.md @@ -8,7 +8,7 @@ PaddleOCR同时支持动态图与静态图两种编程范式 - 静态图版本:develop分支 **近期更新** -- 2021.1.18 [FAQ](./doc/doc_ch/FAQ.md)新增5个高频问题,总数152个,每周一都会更新,欢迎大家持续关注。 +- 2021.2.1 [FAQ](./doc/doc_ch/FAQ.md)新增5个高频问题,总数157个,每周一都会更新,欢迎大家持续关注。 - 2020.12.15 更新数据合成工具[Style-Text](./StyleText/README_ch.md),可以批量合成大量与目标场景类似的图像,在多个场景验证,效果明显提升。 - 2020.11.25 更新半自动标注工具[PPOCRLabel](./PPOCRLabel/README_ch.md),辅助开发者高效完成标注任务,输出格式与PP-OCR训练任务完美衔接。 - 2020.9.22 更新PP-OCR技术文章,https://arxiv.org/abs/2009.09941 diff --git a/doc/doc_ch/FAQ.md b/doc/doc_ch/FAQ.md index 37b9834d..65b379fe 100755 --- a/doc/doc_ch/FAQ.md +++ b/doc/doc_ch/FAQ.md @@ -9,42 +9,41 @@ ## PaddleOCR常见问题汇总(持续更新) -* [近期更新(2021.1.18)](#近期更新) +* [近期更新(2021.2.1)](#近期更新) * [【精选】OCR精选10个问题](#OCR精选10个问题) * [【理论篇】OCR通用32个问题](#OCR通用问题) * [基础知识7题](#基础知识) * [数据集7题](#数据集2) * [模型训练调优18题](#模型训练调优2) -* [【实战篇】PaddleOCR实战110个问题](#PaddleOCR实战问题) +* [【实战篇】PaddleOCR实战115个问题](#PaddleOCR实战问题) * [使用咨询36题](#使用咨询) - * [数据集17题](#数据集3) - * [模型训练调优28题](#模型训练调优3) - * [预测部署29题](#预测部署3) + * [数据集18题](#数据集3) + * [模型训练调优30题](#模型训练调优3) + * [预测部署31题](#预测部署3) ## 近期更新(2021.1.18) +#### Q3.2.18: PaddleOCR动态图版本如何finetune? -#### Q2.3.18: 在PP-OCR系统中,文本检测的骨干网络为什么没有使用SE模块? +**A**:finetune需要将配置文件里的 Global.load_static_weights设置为false,如果没有此字段可以手动添加,然后将模型地址放到Global.pretrained_model字段下即可 -**A**:SE模块是MobileNetV3网络一个重要模块,目的是估计特征图每个特征通道重要性,给特征图每个特征分配权重,提高网络的表达能力。但是,对于文本检测,输入网络的分辨率比较大,一般是640\*640,利用SE模块估计特征图每个特征通道重要性比较困难,网络提升能力有限,但是该模块又比较耗时,因此在PP-OCR系统中,文本检测的骨干网络没有使用SE模块。实验也表明,当去掉SE模块,超轻量模型大小可以减小40%,文本检测效果基本不受影响。详细可以参考PP-OCR技术文章,https://arxiv.org/abs/2009.09941. +#### Q3.3.29: 微调v1.1预训练的模型,可以直接用文字垂直排列和上下颠倒的图片吗?还是必须要水平排列的? -#### Q3.3.27: PaddleOCR关于文本识别模型的训练,支持的数据增强方式有哪些? +**A**:1.1和2.0的模型一样,微调时,垂直排列的文字需要逆时针旋转90%后加入训练,上下颠倒的需要旋转为水平的。 -**A**:文本识别支持的数据增强方式有随机小幅度裁剪、图像平衡、添加白噪声、颜色漂移、图像反色和Text Image Augmentation(TIA)变换等。可以参考[代码](../../ppocr/data/imaug/rec_img_aug.py)中的warp函数。 +#### Q3.3.30: 模型训练过程中如何得到 best_accuracy 模型? -#### Q3.3.28: 关于dygraph分支中,文本识别模型训练,要使用数据增强应该如何设置? +**A**:配置文件里的eval_batch_step字段用来控制多少次iter进行一次eval,在eval完成后会自动生成 best_accuracy 模型,所以将eval_batch_step改小一点(例如,10))就能得到best_accuracy模型了。 -**A**:可以参考[配置文件](../../configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml)在Train['dataset']['transforms']添加RecAug字段,使数据增强生效。可以通过添加对aug_prob设置,表示每种数据增强采用的概率。aug_prob默认是0.4.由于tia数据增强特殊性,默认不采用,可以通过添加use_tia设置,使tia数据增强生效。详细设置可以参考[ISSUE 1744](https://github.com/PaddlePaddle/PaddleOCR/issues/1744)。 +#### Q3.4.30: 如何多进程运行paddleocr? -#### Q3.4.28: PP-OCR系统中,文本检测的结果有置信度吗? +**A**:实例化多个paddleocr服务,然后将服务注册到注册中心,之后通过注册中心统一调度即可。 -**A**:文本检测的结果有置信度,由于推理过程中没有使用,所以没有显示的返回到最终结果中。如果需要文本检测结果的置信度,可以在[文本检测DB的后处理代码](../../ppocr/postprocess/db_postprocess.py)的155行,添加scores信息。这样,在[检测预测代码](../../tools/infer/predict_det.py)的197行,就可以拿到文本检测的scores信息。 +#### Q3.4.31: 2.0训练出来的模型,能否在1.1版本上进行部署? -#### Q3.4.29: DB文本检测,特征提取网络金字塔构建的部分代码在哪儿? - -**A**:特征提取网络金字塔构建的部分:[代码位置](../../ppocr/modeling/necks/db_fpn.py)。ppocr/modeling文件夹里面是组网相关的代码,其中architectures是文本检测或者文本识别整体流程代码;backbones是骨干网络相关代码;necks是类似与FPN的颈函数代码;heads是提取文本检测或者文本识别预测结果相关的头函数;transforms是类似于TPS特征预处理模块。更多的信息可以参考[代码组织结构](./tree.md)。 +**A**:这个是不建议的,2.0训练出来的模型建议使用dygraph分支里提供的部署代码。 ## 【精选】OCR精选10个问题 @@ -415,7 +414,7 @@ python3 -m pip install paddlepaddle-gpu==2.0.0rc1 -i https://mirror.baidu.com/py - develop:基于Paddle静态图开发的分支,推荐使用paddle1.8 或者2.0版本,该分支具备完善的模型训练、预测、推理部署、量化裁剪等功能,领先于release/1.1分支。 - release/1.1:PaddleOCR 发布的第一个稳定版本,基于静态图开发,具备完善的训练、预测、推理部署、量化裁剪等功能。 - dygraph:基于Paddle动态图开发的分支,目前仍在开发中,未来将作为主要开发分支,运行要求使用Paddle2.0rc1版本,目前仍在开发中。 -- release/2.0-rc1-0:PaddleOCR发布的第二个稳定版本,基于动态图和paddle2.0版本开发,动态图开发的工程更易于调试,目前支,支持模型训练、预测,暂不支持移动端部署。 +- release/2.0:PaddleOCR发布的第二个稳定版本,基于动态图和paddle2.0rc1版本开发,动态图开发的工程更易于调试,目前支,支持模型训练、预测,暂不支持移动端部署。 如果您已经上手过PaddleOCR,并且希望在各种环境上部署PaddleOCR,目前建议使用静态图分支,develop或者release/1.1分支。如果您是初学者,想快速训练,调试PaddleOCR中的算法,建议尝鲜PaddleOCR dygraph分支。 @@ -568,6 +567,12 @@ StyleText的用途主要是:提取style_image中的字体、背景等style信 **A**:PPOCRLabel可运行于Linux、Windows、MacOS等多种系统。操作步骤可以参考文档,https://github.com/PaddlePaddle/PaddleOCR/blob/develop/PPOCRLabel/README.md + +#### Q3.2.18: PaddleOCR动态图版本如何finetune? + +**A**:finetune需要将配置文件里的 Global.load_static_weights设置为false,如果没有此字段可以手动添加,然后将模型地址放到Global.pretrained_model字段下即可 + + ### 模型训练调优 @@ -713,6 +718,15 @@ ps -axu | grep train.py | awk '{print $2}' | xargs kill -9 **A**:可以参考[配置文件](../../configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml)在Train['dataset']['transforms']添加RecAug字段,使数据增强生效。可以通过添加对aug_prob设置,表示每种数据增强采用的概率。aug_prob默认是0.4.由于tia数据增强特殊性,默认不采用,可以通过添加use_tia设置,使tia数据增强生效。详细设置可以参考[ISSUE 1744](https://github.com/PaddlePaddle/PaddleOCR/issues/1744)。 +#### Q3.3.29: 微调v1.1预训练的模型,可以直接用文字垂直排列和上下颠倒的图片吗?还是必须要水平排列的? + +**A**:1.1和2.0的模型一样,微调时,垂直排列的文字需要逆时针旋转90%后加入训练,上下颠倒的需要旋转为水平的。 + +#### Q3.3.30: 模型训练过程中如何得到 best_accuracy 模型? + +**A**:配置文件里的eval_batch_step字段用来控制多少次iter进行一次eval,在eval完成后会自动生成 best_accuracy 模型,所以将eval_batch_step改小一点(例如,10))就能得到best_accuracy模型了。 + + ### 预测部署 @@ -854,3 +868,12 @@ img = cv.imdecode(img_array, -1) #### Q3.4.29: DB文本检测,特征提取网络金字塔构建的部分代码在哪儿? **A**:特征提取网络金字塔构建的部分:[代码位置](../../ppocr/modeling/necks/db_fpn.py)。ppocr/modeling文件夹里面是组网相关的代码,其中architectures是文本检测或者文本识别整体流程代码;backbones是骨干网络相关代码;necks是类似与FPN的颈函数代码;heads是提取文本检测或者文本识别预测结果相关的头函数;transforms是类似于TPS特征预处理模块。更多的信息可以参考[代码组织结构](./tree.md)。 + +#### Q3.4.30: 如何多进程运行paddleocr? + +**A**:实例化多个paddleocr服务,然后将服务注册到注册中心,之后通过注册中心统一调度即可。 + + +#### Q3.4.31: 2.0训练出来的模型,能否在1.1版本上进行部署? + +**A**:这个是不建议的,2.0训练出来的模型建议使用dygraph分支里提供的部署代码。 From 7a054c854b8253a2a088c107e55de20a3f207a26 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Mon, 1 Feb 2021 06:27:56 +0000 Subject: [PATCH 15/77] rare doc and opt post_process --- doc/doc_ch/algorithm_overview.md | 5 +++- doc/doc_ch/recognition.md | 2 ++ doc/doc_en/algorithm_overview_en.md | 4 ++- doc/doc_en/recognition_en.md | 3 +++ ppocr/postprocess/rec_postprocess.py | 37 +++++++++++++++++++++++++--- 5 files changed, 45 insertions(+), 6 deletions(-) diff --git a/doc/doc_ch/algorithm_overview.md b/doc/doc_ch/algorithm_overview.md index abbc5da4..4ff7482c 100755 --- a/doc/doc_ch/algorithm_overview.md +++ b/doc/doc_ch/algorithm_overview.md @@ -40,7 +40,7 @@ PaddleOCR基于动态图开源的文本识别算法列表: - [x] CRNN([paper](https://arxiv.org/abs/1507.05717))[7](ppocr推荐) - [x] Rosetta([paper](https://arxiv.org/abs/1910.05085))[10] - [x] STAR-Net([paper](http://www.bmva.org/bmvc/2016/papers/paper043/index.html))[11] -- [ ] RARE([paper](https://arxiv.org/abs/1603.03915v1))[12] coming soon +- [x] RARE([paper](https://arxiv.org/abs/1603.03915v1))[12] - [x] SRN([paper](https://arxiv.org/abs/2003.12294))[5] 参考[DTRB][3](https://arxiv.org/abs/1904.01906)文字识别训练和评估流程,使用MJSynth和SynthText两个文字识别数据集训练,在IIIT, SVT, IC03, IC13, IC15, SVTP, CUTE数据集上进行评估,算法效果如下: @@ -53,6 +53,9 @@ PaddleOCR基于动态图开源的文本识别算法列表: |CRNN|MobileNetV3|79.97%|rec_mv3_none_bilstm_ctc|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_none_bilstm_ctc_v2.0_train.tar)| |StarNet|Resnet34_vd|84.44%|rec_r34_vd_tps_bilstm_ctc|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_ctc_v2.0_train.tar)| |StarNet|MobileNetV3|81.42%|rec_mv3_tps_bilstm_ctc|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_ctc_v2.0_train.tar)| +|RARE|MobileNetV3|82.5|rec_mv3_tps_bilstm_att||[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_att_v2.0_train.tar)| +|RARE|Resnet34_vd|83.6|rec_r34_vd_tps_bilstm_att||[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_att_v2.0_train.tar)| |SRN|Resnet50_vd_fpn| 88.52% | rec_r50fpn_vd_none_srn | [下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r50_vd_srn_train.tar) | + PaddleOCR文本识别算法的训练和使用请参考文档教程中[模型训练/评估中的文本识别部分](./recognition.md)。 diff --git a/doc/doc_ch/recognition.md b/doc/doc_ch/recognition.md index bc877ab7..f36e8019 100644 --- a/doc/doc_ch/recognition.md +++ b/doc/doc_ch/recognition.md @@ -201,6 +201,8 @@ PaddleOCR支持训练和评估交替进行, 可以在 `configs/rec/rec_icdar15_t | rec_mv3_none_none_ctc.yml | Rosetta | Mobilenet_v3 large 0.5 | None | None | ctc | | rec_r34_vd_none_bilstm_ctc.yml | CRNN | Resnet34_vd | None | BiLSTM | ctc | | rec_r34_vd_none_none_ctc.yml | Rosetta | Resnet34_vd | None | None | ctc | +| rec_mv3_tps_bilstm_att.yml | CRNN | Mobilenet_v3 | TPS | BiLSTM | att | +| rec_r34_vd_tps_bilstm_att.yml | CRNN | Resnet34_vd | TPS | BiLSTM | att | | rec_r50fpn_vd_none_srn.yml | SRN | Resnet50_fpn_vd | None | rnn | srn | 训练中文数据,推荐使用[rec_chinese_lite_train_v2.0.yml](../../configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml),如您希望尝试其他算法在中文数据集上的效果,请参考下列说明修改配置文件: diff --git a/doc/doc_en/algorithm_overview_en.md b/doc/doc_en/algorithm_overview_en.md index 7d7896e7..423fe807 100755 --- a/doc/doc_en/algorithm_overview_en.md +++ b/doc/doc_en/algorithm_overview_en.md @@ -42,7 +42,7 @@ PaddleOCR open-source text recognition algorithms list: - [x] CRNN([paper](https://arxiv.org/abs/1507.05717))[7] - [x] Rosetta([paper](https://arxiv.org/abs/1910.05085))[10] - [x] STAR-Net([paper](http://www.bmva.org/bmvc/2016/papers/paper043/index.html))[11] -- [ ] RARE([paper](https://arxiv.org/abs/1603.03915v1))[12] coming soon +- [x] RARE([paper](https://arxiv.org/abs/1603.03915v1))[12] - [x] SRN([paper](https://arxiv.org/abs/2003.12294))[5] Refer to [DTRB](https://arxiv.org/abs/1904.01906), the training and evaluation result of these above text recognition (using MJSynth and SynthText for training, evaluate on IIIT, SVT, IC03, IC13, IC15, SVTP, CUTE) is as follow: @@ -55,6 +55,8 @@ Refer to [DTRB](https://arxiv.org/abs/1904.01906), the training and evaluation r |CRNN|MobileNetV3|79.97%|rec_mv3_none_bilstm_ctc|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_none_bilstm_ctc_v2.0_train.tar)| |StarNet|Resnet34_vd|84.44%|rec_r34_vd_tps_bilstm_ctc|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_ctc_v2.0_train.tar)| |StarNet|MobileNetV3|81.42%|rec_mv3_tps_bilstm_ctc|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_ctc_v2.0_train.tar)| +|RARE|MobileNetV3|82.5|rec_mv3_tps_bilstm_att||[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_att_v2.0_train.tar)| +|RARE|Resnet34_vd|83.6|rec_r34_vd_tps_bilstm_att||[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_att_v2.0_train.tar)| |SRN|Resnet50_vd_fpn| 88.52% | rec_r50fpn_vd_none_srn |[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r50_vd_srn_train.tar)| Please refer to the document for training guide and use of PaddleOCR text recognition algorithms [Text recognition model training/evaluation/prediction](./recognition_en.md) diff --git a/doc/doc_en/recognition_en.md b/doc/doc_en/recognition_en.md index f29703d1..c2ff2022 100644 --- a/doc/doc_en/recognition_en.md +++ b/doc/doc_en/recognition_en.md @@ -195,8 +195,11 @@ If the evaluation set is large, the test will be time-consuming. It is recommend | rec_mv3_none_none_ctc.yml | Rosetta | Mobilenet_v3 large 0.5 | None | None | ctc | | rec_r34_vd_none_bilstm_ctc.yml | CRNN | Resnet34_vd | None | BiLSTM | ctc | | rec_r34_vd_none_none_ctc.yml | Rosetta | Resnet34_vd | None | None | ctc | +| rec_mv3_tps_bilstm_att.yml | CRNN | Mobilenet_v3 | TPS | BiLSTM | att | +| rec_r34_vd_tps_bilstm_att.yml | CRNN | Resnet34_vd | TPS | BiLSTM | att | | rec_r50fpn_vd_none_srn.yml | SRN | Resnet50_fpn_vd | None | rnn | srn | + For training Chinese data, it is recommended to use [rec_chinese_lite_train_v2.0.yml](../../configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml). If you want to try the result of other algorithms on the Chinese data set, please refer to the following instructions to modify the configuration file: co diff --git a/ppocr/postprocess/rec_postprocess.py b/ppocr/postprocess/rec_postprocess.py index 2b82750f..d7e658f3 100644 --- a/ppocr/postprocess/rec_postprocess.py +++ b/ppocr/postprocess/rec_postprocess.py @@ -143,6 +143,35 @@ class AttnLabelDecode(BaseRecLabelDecode): dict_character = [self.beg_str] + dict_character + [self.end_str] return dict_character + def decode(self, text_index, text_prob=None, is_remove_duplicate=False): + """ convert text-index into text-label. """ + result_list = [] + ignored_tokens = self.get_ignored_tokens() + [beg_idx, end_idx] = self.get_ignored_tokens() + batch_size = len(text_index) + for batch_idx in range(batch_size): + char_list = [] + conf_list = [] + for idx in range(len(text_index[batch_idx])): + if text_index[batch_idx][idx] in ignored_tokens: + continue + if int(text_index[batch_idx][idx]) == int(end_idx): + break + if is_remove_duplicate: + # only for predict + if idx > 0 and text_index[batch_idx][idx - 1] == text_index[ + batch_idx][idx]: + continue + char_list.append(self.character[int(text_index[batch_idx][ + idx])]) + if text_prob is not None: + conf_list.append(text_prob[batch_idx][idx]) + else: + conf_list.append(1) + text = ''.join(char_list) + result_list.append((text, np.mean(conf_list))) + return result_list + def __call__(self, preds, label=None, *args, **kwargs): """ text = self.decode(text) @@ -157,10 +186,10 @@ class AttnLabelDecode(BaseRecLabelDecode): preds_idx = preds.argmax(axis=2) preds_prob = preds.max(axis=2) - text = self.decode(preds_idx, preds_prob, is_remove_duplicate=True) + text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False) if label is None: return text - label = self.decode(label, is_remove_duplicate=True) + label = self.decode(label, is_remove_duplicate=False) return text, label def encoder(self, labels, labels_length): @@ -226,12 +255,12 @@ class SRNLabelDecode(BaseRecLabelDecode): text = self.decode(preds_idx, preds_prob) if label is None: - text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False) + text = self.decode(preds_idx, preds_prob, is_remove_duplicate=True) return text label = self.decode(label) return text, label - def decode(self, text_index, text_prob=None, is_remove_duplicate=False): + def decode(self, text_index, text_prob=None, is_remove_duplicate=True): """ convert text-index into text-label. """ result_list = [] ignored_tokens = self.get_ignored_tokens() From 0f4d92b63f811db56561f0f7223909dbe56d4fe3 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Mon, 1 Feb 2021 06:32:14 +0000 Subject: [PATCH 16/77] fix conflict wiith SRN --- ppocr/postprocess/rec_postprocess.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ppocr/postprocess/rec_postprocess.py b/ppocr/postprocess/rec_postprocess.py index d7e658f3..d4991222 100644 --- a/ppocr/postprocess/rec_postprocess.py +++ b/ppocr/postprocess/rec_postprocess.py @@ -255,12 +255,12 @@ class SRNLabelDecode(BaseRecLabelDecode): text = self.decode(preds_idx, preds_prob) if label is None: - text = self.decode(preds_idx, preds_prob, is_remove_duplicate=True) + text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False) return text label = self.decode(label) return text, label - def decode(self, text_index, text_prob=None, is_remove_duplicate=True): + def decode(self, text_index, text_prob=None, is_remove_duplicate=False): """ convert text-index into text-label. """ result_list = [] ignored_tokens = self.get_ignored_tokens() From e7d24ac8b87a76d36c1f0e022d450db633e00017 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Mon, 1 Feb 2021 06:41:45 +0000 Subject: [PATCH 17/77] fix comment --- configs/rec/rec_r34_vd_tps_bilstm_att.yml | 2 -- ppocr/postprocess/rec_postprocess.py | 19 ------------------- 2 files changed, 21 deletions(-) diff --git a/configs/rec/rec_r34_vd_tps_bilstm_att.yml b/configs/rec/rec_r34_vd_tps_bilstm_att.yml index f42bfdcc..7be34b9c 100644 --- a/configs/rec/rec_r34_vd_tps_bilstm_att.yml +++ b/configs/rec/rec_r34_vd_tps_bilstm_att.yml @@ -43,7 +43,6 @@ Architecture: Backbone: name: ResNet layers: 34 - Neck: name: SequenceEncoder encoder_type: rnn @@ -52,7 +51,6 @@ Architecture: name: AttentionHead # AttentionHead hidden_size: 256 # l2_decay: 0.00001 - Loss: name: AttentionLoss diff --git a/ppocr/postprocess/rec_postprocess.py b/ppocr/postprocess/rec_postprocess.py index d4991222..af243caa 100644 --- a/ppocr/postprocess/rec_postprocess.py +++ b/ppocr/postprocess/rec_postprocess.py @@ -192,25 +192,6 @@ class AttnLabelDecode(BaseRecLabelDecode): label = self.decode(label, is_remove_duplicate=False) return text, label - def encoder(self, labels, labels_length): - """ - used to encoder labels readed from LMDB dataset, forexample: - [35, 25, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] encode to - 'you': [0, 35,25,31, 37, 0, ...] 'sos'you'eos' - """ - if isinstance(labels, paddle.Tensor): - labels = labels.numpy() - batch_max_length = labels.shape[ - 1] + 2 # add start token 'sos' and end token 'eos' - new_labels = np.zeros( - [labels.shape[0], batch_max_length]).astype(np.int64) - for i in range(labels.shape[0]): - new_labels[i, 1:1 + labels_length[i]] = labels[i, :labels_length[ - i]] # new_labels[i, 0] = 'sos' token - new_labels[i, labels_length[i] + 1] = len( - self.character) - 1 # add end charactor 'eos' token - return new_labels - def get_ignored_tokens(self): beg_idx = self.get_beg_end_flag_idx("beg") end_idx = self.get_beg_end_flag_idx("end") From 550022ea663df53f62d199954c328493043ec1e0 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Mon, 1 Feb 2021 06:44:04 +0000 Subject: [PATCH 18/77] fix comment --- ppocr/data/imaug/label_ops.py | 2 +- ppocr/modeling/heads/rec_att_head.py | 15 --------------- 2 files changed, 1 insertion(+), 16 deletions(-) diff --git a/ppocr/data/imaug/label_ops.py b/ppocr/data/imaug/label_ops.py index 191bda92..26ac4d81 100644 --- a/ppocr/data/imaug/label_ops.py +++ b/ppocr/data/imaug/label_ops.py @@ -211,7 +211,7 @@ class AttnLabelEncode(BaseRecLabelEncode): text = self.encode(text) if text is None: return None - if len(text) > self.max_text_len: + if len(text) >= self.max_text_len: return None data['length'] = np.array(len(text)) text = [0] + text + [len(self.character) - 1] + [0] * (self.max_text_len diff --git a/ppocr/modeling/heads/rec_att_head.py b/ppocr/modeling/heads/rec_att_head.py index d01f0e6c..9f065d61 100644 --- a/ppocr/modeling/heads/rec_att_head.py +++ b/ppocr/modeling/heads/rec_att_head.py @@ -194,18 +194,3 @@ class AttentionLSTMCell(nn.Layer): cur_hidden = self.rnn(concat_context, prev_hidden) return cur_hidden, alpha - - -if __name__ == '__main__': - paddle.disable_static() - - model = Attention(100, 200, 10) - - x = np.random.uniform(-1, 1, [2, 10, 100]).astype(np.float32) - y = np.random.randint(0, 10, [2, 21]).astype(np.int32) - - xp = paddle.to_tensor(x) - yp = paddle.to_tensor(y) - - res = model(inputs=xp, targets=yp, is_train=True, batch_max_length=20) - print("res: ", res.shape) From fae6f1eef76d934e8d197fadd5de9ebda54b799f Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Mon, 1 Feb 2021 14:50:44 +0800 Subject: [PATCH 19/77] update --- doc/doc_ch/FAQ.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/doc_ch/FAQ.md b/doc/doc_ch/FAQ.md index 0f19036a..4724b0bc 100755 --- a/doc/doc_ch/FAQ.md +++ b/doc/doc_ch/FAQ.md @@ -33,7 +33,7 @@ **A**:1.1和2.0的模型一样,微调时,垂直排列的文字需要逆时针旋转90%后加入训练,上下颠倒的需要旋转为水平的。 #### Q3.3.30: 模型训练过程中如何得到 best_accuracy 模型? -**A**:配置文件里的eval_batch_step字段用来控制多少次iter进行一次eval,在eval完成后会自动生成 best_accuracy 模型,所以将eval_batch_step改小一点(例如,10))就能得到best_accuracy模型了。 +**A**:配置文件里的eval_batch_step字段用来控制多少次iter进行一次eval,在eval完成后会自动生成 best_accuracy 模型,所以如果希望很快就能拿到best_accuracy模型,可以将eval_batch_step改小一点(例如,10)。 #### Q3.4.33: 如何多进程运行paddleocr? **A**:实例化多个paddleocr服务,然后将服务注册到注册中心,之后通过注册中心统一调度即可。 @@ -724,7 +724,7 @@ ps -axu | grep train.py | awk '{print $2}' | xargs kill -9 **A**:1.1和2.0的模型一样,微调时,垂直排列的文字需要逆时针旋转90%后加入训练,上下颠倒的需要旋转为水平的。 #### Q3.3.30: 模型训练过程中如何得到 best_accuracy 模型? -**A**:配置文件里的eval_batch_step字段用来控制多少次iter进行一次eval,在eval完成后会自动生成 best_accuracy 模型,所以将eval_batch_step改小一点(例如,10))就能得到best_accuracy模型了。 +**A**:配置文件里的eval_batch_step字段用来控制多少次iter进行一次eval,在eval完成后会自动生成 best_accuracy 模型,所以如果希望很快就能拿到best_accuracy模型,可以将eval_batch_step改小一点(例如,10)。 From 0d89f3f91321811a77186f9a67ba10bb542a2efa Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Mon, 1 Feb 2021 06:54:56 +0000 Subject: [PATCH 20/77] fix comment --- ppocr/modeling/heads/rec_att_head.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ppocr/modeling/heads/rec_att_head.py b/ppocr/modeling/heads/rec_att_head.py index 9f065d61..bfe37e7a 100644 --- a/ppocr/modeling/heads/rec_att_head.py +++ b/ppocr/modeling/heads/rec_att_head.py @@ -20,7 +20,6 @@ import paddle import paddle.nn as nn import paddle.nn.functional as F import numpy as np -from paddle.jit import to_static class AttentionHead(nn.Layer): From a92bb6d310e3fc423440dea93a99bd0ca1c285dd Mon Sep 17 00:00:00 2001 From: iamyoyo <30454163+iamyoyo@users.noreply.github.com> Date: Mon, 1 Feb 2021 15:09:14 +0800 Subject: [PATCH 21/77] Update program.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PaddleOCR/tools/program.py的第254行的变量名原为cur_metirc,需要修改为cur_metric,如果不修改在运行是,会提示 The variable name of line 254 can be changed to cur_ Metric. If you do not modify it, you will be prompted when it is running "E:\XXX\PaddleOCR\tools\program.py", line 257, in train ['{}: {}'.format(k, v) for k, v in cur_metric.items()])) NameError: name 'cur_metric' is not defined --- tools/program.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/program.py b/tools/program.py index f3ba4945..2daf309a 100755 --- a/tools/program.py +++ b/tools/program.py @@ -251,7 +251,7 @@ def train(config, min_average_window=10000, max_average_window=15625) Model_Average.apply() - cur_metirc = eval(model, valid_dataloader, post_process_class, + cur_metric = eval(model, valid_dataloader, post_process_class, # 原cur_metirc修改为 cur_metric eval_class) cur_metric_str = 'cur metric, {}'.format(', '.join( ['{}: {}'.format(k, v) for k, v in cur_metric.items()])) From be94977426d5bb7e912e1a64c7feeb58c783d419 Mon Sep 17 00:00:00 2001 From: iamyoyo <30454163+iamyoyo@users.noreply.github.com> Date: Mon, 1 Feb 2021 15:27:56 +0800 Subject: [PATCH 22/77] =?UTF-8?q?program.py=20257=E8=A1=8C=20=E5=8F=98?= =?UTF-8?q?=E9=87=8F=E5=90=8D=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PaddleOCR/tools/program.py的第254行的变量名原为cur_metirc,需要修改为cur_metric,如果不修改在运行是,会提示 The variable name of line 254 can be changed to cur_ Metric. If you do not modify it, you will be prompted when it is running "E:\XXX\PaddleOCR\tools\program.py", line 257, in train ['{}: {}'.format(k, v) for k, v in cur_metric.items()])) NameError: name 'cur_metric' is not defined --- tools/program.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/program.py b/tools/program.py index 2daf309a..a24d6ca7 100755 --- a/tools/program.py +++ b/tools/program.py @@ -251,7 +251,7 @@ def train(config, min_average_window=10000, max_average_window=15625) Model_Average.apply() - cur_metric = eval(model, valid_dataloader, post_process_class, # 原cur_metirc修改为 cur_metric + cur_metric = eval(model, valid_dataloader, post_process_class, eval_class) cur_metric_str = 'cur metric, {}'.format(', '.join( ['{}: {}'.format(k, v) for k, v in cur_metric.items()])) From 23df5bcf1251a9f530acc7a7b96ef619b831df99 Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Mon, 1 Feb 2021 15:38:31 +0800 Subject: [PATCH 23/77] update dockerfile --- deploy/docker/hubserving/cpu/Dockerfile | 4 +--- deploy/docker/hubserving/gpu/Dockerfile | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/deploy/docker/hubserving/cpu/Dockerfile b/deploy/docker/hubserving/cpu/Dockerfile index 62a01bee..ef1a7b7b 100644 --- a/deploy/docker/hubserving/cpu/Dockerfile +++ b/deploy/docker/hubserving/cpu/Dockerfile @@ -1,11 +1,9 @@ # Version: 2.0.0 -FROM registry.baidubce.com/paddlepaddle/paddle:2.0.0rc1 +FROM registry.baidubce.com/paddlepaddle/paddle:2.0.0 # PaddleOCR base on Python3.7 RUN pip3.7 install --upgrade pip -i https://mirror.baidu.com/pypi/simple -RUN python3.7 -m pip install paddlepaddle==2.0.0 -i https://mirror.baidu.com/pypi/simple - RUN pip3.7 install paddlehub --upgrade -i https://mirror.baidu.com/pypi/simple RUN git clone https://github.com/PaddlePaddle/PaddleOCR.git /PaddleOCR diff --git a/deploy/docker/hubserving/gpu/Dockerfile b/deploy/docker/hubserving/gpu/Dockerfile index 7a80bc56..b3f2d21f 100644 --- a/deploy/docker/hubserving/gpu/Dockerfile +++ b/deploy/docker/hubserving/gpu/Dockerfile @@ -1,11 +1,9 @@ # Version: 2.0.0 -FROM registry.baidubce.com/paddlepaddle/paddle:2.0.0rc1-gpu-cuda10.0-cudnn7 +FROM registry.baidubce.com/paddlepaddle/paddle:2.0.0-gpu-cuda10.1-cudnn7 # PaddleOCR base on Python3.7 RUN pip3.7 install --upgrade pip -i https://mirror.baidu.com/pypi/simple -RUN python3.7 -m pip install paddlepaddle-gpu==2.0.0 -i https://mirror.baidu.com/pypi/simple - RUN pip3.7 install paddlehub --upgrade -i https://mirror.baidu.com/pypi/simple RUN git clone https://github.com/PaddlePaddle/PaddleOCR.git /PaddleOCR From a094d2775560a6dbb6e18cd761b99edd238956c2 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Mon, 1 Feb 2021 08:08:18 +0000 Subject: [PATCH 24/77] opt rec_att_head --- ppocr/modeling/heads/rec_att_head.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/ppocr/modeling/heads/rec_att_head.py b/ppocr/modeling/heads/rec_att_head.py index bfe37e7a..a7cfe128 100644 --- a/ppocr/modeling/heads/rec_att_head.py +++ b/ppocr/modeling/heads/rec_att_head.py @@ -64,8 +64,10 @@ class AttentionHead(nn.Layer): (outputs, hidden), alpha = self.attention_cell(hidden, inputs, char_onehots) probs_step = self.generator(outputs) - probs = paddle.unsqueeze( - probs_step, axis=1) if probs is None else paddle.concat( + if probs is None: + probs = paddle.unsqueeze(probs_step, axis=1) + else: + probs = paddle.concat( [probs, paddle.unsqueeze( probs_step, axis=1)], axis=1) next_input = probs_step.argmax(axis=1) @@ -152,8 +154,10 @@ class AttentionLSTM(nn.Layer): char_onehots) probs_step = self.generator(hidden[0]) hidden = (hidden[1][0], hidden[1][1]) - probs = paddle.unsqueeze( - probs_step, axis=1) if probs is None else paddle.concat( + if probs is None: + probs = paddle.unsqueeze(probs_step, axis=1) + else: + probs = paddle.concat( [probs, paddle.unsqueeze( probs_step, axis=1)], axis=1) From 02b0bce42db9c17e641e205a186001d203d0172a Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Mon, 1 Feb 2021 16:16:15 +0800 Subject: [PATCH 25/77] update --- doc/doc_ch/FAQ.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/doc_ch/FAQ.md b/doc/doc_ch/FAQ.md index 4724b0bc..a4a6faf3 100755 --- a/doc/doc_ch/FAQ.md +++ b/doc/doc_ch/FAQ.md @@ -36,7 +36,8 @@ **A**:配置文件里的eval_batch_step字段用来控制多少次iter进行一次eval,在eval完成后会自动生成 best_accuracy 模型,所以如果希望很快就能拿到best_accuracy模型,可以将eval_batch_step改小一点(例如,10)。 #### Q3.4.33: 如何多进程运行paddleocr? -**A**:实例化多个paddleocr服务,然后将服务注册到注册中心,之后通过注册中心统一调度即可。 +**A**:实例化多个paddleocr服务,然后将服务注册到注册中心,之后通过注册中心统一调度即可,关于注册中心,可以搜索eureka了解一下具体使用,其他的注册中心也行。 + #### Q3.4.34: 2.0训练出来的模型,能否在1.1版本上进行部署? **A**:这个是不建议的,2.0训练出来的模型建议使用dygraph分支里提供的部署代码。 @@ -884,7 +885,7 @@ Paddle2ONNX支持转换的[模型列表](https://github.com/PaddlePaddle/Paddle2 #### Q3.4.33: 如何多进程运行paddleocr? -**A**:实例化多个paddleocr服务,然后将服务注册到注册中心,之后通过注册中心统一调度即可。 +**A**:实例化多个paddleocr服务,然后将服务注册到注册中心,之后通过注册中心统一调度即可,关于注册中心,可以搜索eureka了解一下具体使用,其他的注册中心也行。 #### Q3.4.34: 2.0训练出来的模型,能否在1.1版本上进行部署? **A**:这个是不建议的,2.0训练出来的模型建议使用dygraph分支里提供的部署代码。 From dbef4a1d344515450ae885b55708286574f3eab5 Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Mon, 1 Feb 2021 16:20:43 +0800 Subject: [PATCH 26/77] update --- doc/doc_ch/FAQ.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/doc_ch/FAQ.md b/doc/doc_ch/FAQ.md index a4a6faf3..1b92c76c 100755 --- a/doc/doc_ch/FAQ.md +++ b/doc/doc_ch/FAQ.md @@ -30,7 +30,7 @@ #### Q3.3.29: 微调v1.1预训练的模型,可以直接用文字垂直排列和上下颠倒的图片吗?还是必须要水平排列的? -**A**:1.1和2.0的模型一样,微调时,垂直排列的文字需要逆时针旋转90%后加入训练,上下颠倒的需要旋转为水平的。 +**A**:1.1和2.0的模型一样,微调时,垂直排列的文字需要逆时针旋转 90° 后加入训练,上下颠倒的需要旋转为水平的。 #### Q3.3.30: 模型训练过程中如何得到 best_accuracy 模型? **A**:配置文件里的eval_batch_step字段用来控制多少次iter进行一次eval,在eval完成后会自动生成 best_accuracy 模型,所以如果希望很快就能拿到best_accuracy模型,可以将eval_batch_step改小一点(例如,10)。 @@ -722,7 +722,7 @@ ps -axu | grep train.py | awk '{print $2}' | xargs kill -9 **A**:可以参考[配置文件](../../configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml)在Train['dataset']['transforms']添加RecAug字段,使数据增强生效。可以通过添加对aug_prob设置,表示每种数据增强采用的概率。aug_prob默认是0.4.由于tia数据增强特殊性,默认不采用,可以通过添加use_tia设置,使tia数据增强生效。详细设置可以参考[ISSUE 1744](https://github.com/PaddlePaddle/PaddleOCR/issues/1744)。 #### Q3.3.29: 微调v1.1预训练的模型,可以直接用文字垂直排列和上下颠倒的图片吗?还是必须要水平排列的? -**A**:1.1和2.0的模型一样,微调时,垂直排列的文字需要逆时针旋转90%后加入训练,上下颠倒的需要旋转为水平的。 +**A**:1.1和2.0的模型一样,微调时,垂直排列的文字需要逆时针旋转 90°后加入训练,上下颠倒的需要旋转为水平的。 #### Q3.3.30: 模型训练过程中如何得到 best_accuracy 模型? **A**:配置文件里的eval_batch_step字段用来控制多少次iter进行一次eval,在eval完成后会自动生成 best_accuracy 模型,所以如果希望很快就能拿到best_accuracy模型,可以将eval_batch_step改小一点(例如,10)。 From 259f3bb0e039063d5fe29597cb121e24a349501c Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Mon, 1 Feb 2021 16:23:04 +0800 Subject: [PATCH 27/77] update --- doc/doc_ch/FAQ.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/doc_ch/FAQ.md b/doc/doc_ch/FAQ.md index 1b92c76c..2026e7a0 100755 --- a/doc/doc_ch/FAQ.md +++ b/doc/doc_ch/FAQ.md @@ -725,7 +725,7 @@ ps -axu | grep train.py | awk '{print $2}' | xargs kill -9 **A**:1.1和2.0的模型一样,微调时,垂直排列的文字需要逆时针旋转 90°后加入训练,上下颠倒的需要旋转为水平的。 #### Q3.3.30: 模型训练过程中如何得到 best_accuracy 模型? -**A**:配置文件里的eval_batch_step字段用来控制多少次iter进行一次eval,在eval完成后会自动生成 best_accuracy 模型,所以如果希望很快就能拿到best_accuracy模型,可以将eval_batch_step改小一点(例如,10)。 +**A**:配置文件里的eval_batch_step字段用来控制多少次iter进行一次eval,在eval完成后会自动生成 best_accuracy 模型,所以如果希望很快就能拿到best_accuracy模型,可以将eval_batch_step改小一点,如改为[10,10],这样表示第10次迭代后,以后没隔10个迭代就进行一次模型的评估。 From 873363589caae298fd01d54e7c6011730b79b49d Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Mon, 1 Feb 2021 09:02:14 +0000 Subject: [PATCH 28/77] fix link --- doc/doc_ch/algorithm_overview.md | 4 ++-- doc/doc_en/algorithm_overview_en.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/doc_ch/algorithm_overview.md b/doc/doc_ch/algorithm_overview.md index 4ff7482c..3c452095 100755 --- a/doc/doc_ch/algorithm_overview.md +++ b/doc/doc_ch/algorithm_overview.md @@ -53,8 +53,8 @@ PaddleOCR基于动态图开源的文本识别算法列表: |CRNN|MobileNetV3|79.97%|rec_mv3_none_bilstm_ctc|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_none_bilstm_ctc_v2.0_train.tar)| |StarNet|Resnet34_vd|84.44%|rec_r34_vd_tps_bilstm_ctc|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_ctc_v2.0_train.tar)| |StarNet|MobileNetV3|81.42%|rec_mv3_tps_bilstm_ctc|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_ctc_v2.0_train.tar)| -|RARE|MobileNetV3|82.5|rec_mv3_tps_bilstm_att||[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_att_v2.0_train.tar)| -|RARE|Resnet34_vd|83.6|rec_r34_vd_tps_bilstm_att||[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_att_v2.0_train.tar)| +|RARE|MobileNetV3|82.5|rec_mv3_tps_bilstm_att |[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_att_v2.0_train.tar)| +|RARE|Resnet34_vd|83.6|rec_r34_vd_tps_bilstm_att |[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_att_v2.0_train.tar)| |SRN|Resnet50_vd_fpn| 88.52% | rec_r50fpn_vd_none_srn | [下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r50_vd_srn_train.tar) | diff --git a/doc/doc_en/algorithm_overview_en.md b/doc/doc_en/algorithm_overview_en.md index 423fe807..8f899a5a 100755 --- a/doc/doc_en/algorithm_overview_en.md +++ b/doc/doc_en/algorithm_overview_en.md @@ -55,8 +55,8 @@ Refer to [DTRB](https://arxiv.org/abs/1904.01906), the training and evaluation r |CRNN|MobileNetV3|79.97%|rec_mv3_none_bilstm_ctc|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_none_bilstm_ctc_v2.0_train.tar)| |StarNet|Resnet34_vd|84.44%|rec_r34_vd_tps_bilstm_ctc|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_ctc_v2.0_train.tar)| |StarNet|MobileNetV3|81.42%|rec_mv3_tps_bilstm_ctc|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_ctc_v2.0_train.tar)| -|RARE|MobileNetV3|82.5|rec_mv3_tps_bilstm_att||[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_att_v2.0_train.tar)| -|RARE|Resnet34_vd|83.6|rec_r34_vd_tps_bilstm_att||[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_att_v2.0_train.tar)| +|RARE|MobileNetV3|82.5|rec_mv3_tps_bilstm_att |[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_att_v2.0_train.tar)| +|RARE|Resnet34_vd|83.6|rec_r34_vd_tps_bilstm_att |[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_att_v2.0_train.tar)| |SRN|Resnet50_vd_fpn| 88.52% | rec_r50fpn_vd_none_srn |[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r50_vd_srn_train.tar)| Please refer to the document for training guide and use of PaddleOCR text recognition algorithms [Text recognition model training/evaluation/prediction](./recognition_en.md) From 8e2dc741b31dbcbcccb2413fa7faa6f2bd609f4a Mon Sep 17 00:00:00 2001 From: MissPenguin Date: Mon, 1 Feb 2021 17:05:35 +0800 Subject: [PATCH 29/77] Update algorithm_overview_en.md --- doc/doc_en/algorithm_overview_en.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/doc_en/algorithm_overview_en.md b/doc/doc_en/algorithm_overview_en.md index 8f899a5a..77b9642e 100755 --- a/doc/doc_en/algorithm_overview_en.md +++ b/doc/doc_en/algorithm_overview_en.md @@ -55,8 +55,8 @@ Refer to [DTRB](https://arxiv.org/abs/1904.01906), the training and evaluation r |CRNN|MobileNetV3|79.97%|rec_mv3_none_bilstm_ctc|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_none_bilstm_ctc_v2.0_train.tar)| |StarNet|Resnet34_vd|84.44%|rec_r34_vd_tps_bilstm_ctc|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_ctc_v2.0_train.tar)| |StarNet|MobileNetV3|81.42%|rec_mv3_tps_bilstm_ctc|[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_ctc_v2.0_train.tar)| -|RARE|MobileNetV3|82.5|rec_mv3_tps_bilstm_att |[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_att_v2.0_train.tar)| -|RARE|Resnet34_vd|83.6|rec_r34_vd_tps_bilstm_att |[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_att_v2.0_train.tar)| +|RARE|MobileNetV3|82.5%|rec_mv3_tps_bilstm_att |[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_att_v2.0_train.tar)| +|RARE|Resnet34_vd|83.6%|rec_r34_vd_tps_bilstm_att |[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_att_v2.0_train.tar)| |SRN|Resnet50_vd_fpn| 88.52% | rec_r50fpn_vd_none_srn |[Download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r50_vd_srn_train.tar)| Please refer to the document for training guide and use of PaddleOCR text recognition algorithms [Text recognition model training/evaluation/prediction](./recognition_en.md) From fe775780e4bcd87bf9a97fcf5fb646193ee70c12 Mon Sep 17 00:00:00 2001 From: MissPenguin Date: Mon, 1 Feb 2021 17:06:23 +0800 Subject: [PATCH 30/77] Update algorithm_overview.md --- doc/doc_ch/algorithm_overview.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/doc_ch/algorithm_overview.md b/doc/doc_ch/algorithm_overview.md index 3c452095..c8fc280d 100755 --- a/doc/doc_ch/algorithm_overview.md +++ b/doc/doc_ch/algorithm_overview.md @@ -53,8 +53,8 @@ PaddleOCR基于动态图开源的文本识别算法列表: |CRNN|MobileNetV3|79.97%|rec_mv3_none_bilstm_ctc|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_none_bilstm_ctc_v2.0_train.tar)| |StarNet|Resnet34_vd|84.44%|rec_r34_vd_tps_bilstm_ctc|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_ctc_v2.0_train.tar)| |StarNet|MobileNetV3|81.42%|rec_mv3_tps_bilstm_ctc|[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_ctc_v2.0_train.tar)| -|RARE|MobileNetV3|82.5|rec_mv3_tps_bilstm_att |[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_att_v2.0_train.tar)| -|RARE|Resnet34_vd|83.6|rec_r34_vd_tps_bilstm_att |[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_att_v2.0_train.tar)| +|RARE|MobileNetV3|82.5%|rec_mv3_tps_bilstm_att |[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_att_v2.0_train.tar)| +|RARE|Resnet34_vd|83.6%|rec_r34_vd_tps_bilstm_att |[下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_att_v2.0_train.tar)| |SRN|Resnet50_vd_fpn| 88.52% | rec_r50fpn_vd_none_srn | [下载链接](https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r50_vd_srn_train.tar) | From 077fcabcbce41015966c683d7eb2636a00dbc2e1 Mon Sep 17 00:00:00 2001 From: littletomatodonkey Date: Mon, 1 Feb 2021 14:03:32 +0000 Subject: [PATCH 31/77] fix doc --- doc/doc_ch/recognition.md | 2 +- doc/doc_en/recognition_en.md | 2 +- tools/program.py | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/doc_ch/recognition.md b/doc/doc_ch/recognition.md index f36e8019..91d64907 100644 --- a/doc/doc_ch/recognition.md +++ b/doc/doc_ch/recognition.md @@ -133,7 +133,7 @@ word_dict.txt 每行有一个单字,将字符与数字索引映射在一起, 您可以按需使用。 目前的多语言模型仍处在demo阶段,会持续优化模型并补充语种,**非常欢迎您为我们提供其他语言的字典和字体**, -如您愿意可将字典文件提交至 [dict](../../ppocr/utils/dict) 将语料文件提交至[corpus](../../ppocr/utils/corpus),我们会在Repo中感谢您。 +如您愿意可将字典文件提交至 [dict](../../ppocr/utils/dict),我们会在Repo中感谢您。 - 自定义字典 diff --git a/doc/doc_en/recognition_en.md b/doc/doc_en/recognition_en.md index c2ff2022..14ddcc75 100644 --- a/doc/doc_en/recognition_en.md +++ b/doc/doc_en/recognition_en.md @@ -126,7 +126,7 @@ In `word_dict.txt`, there is a single word in each line, which maps characters a You can use it on demand. The current multi-language model is still in the demo stage and will continue to optimize the model and add languages. **You are very welcome to provide us with dictionaries and fonts in other languages**, -If you like, you can submit the dictionary file to [dict](../../ppocr/utils/dict) or corpus file to [corpus](../../ppocr/utils/corpus) and we will thank you in the Repo. +If you like, you can submit the dictionary file to [dict](../../ppocr/utils/dict) and we will thank you in the Repo. To customize the dict file, please modify the `character_dict_path` field in `configs/rec/rec_icdar15_train.yml` and set `character_type` to `ch`. diff --git a/tools/program.py b/tools/program.py index a24d6ca7..99a37432 100755 --- a/tools/program.py +++ b/tools/program.py @@ -222,8 +222,8 @@ def train(config, batch = [item.numpy() for item in batch] post_result = post_process_class(preds, batch[1]) eval_class(post_result, batch) - metirc = eval_class.get_metric() - train_stats.update(metirc) + metric = eval_class.get_metric() + train_stats.update(metric) if vdl_writer is not None and dist.get_rank() == 0: for k, v in train_stats.get().items(): @@ -251,7 +251,7 @@ def train(config, min_average_window=10000, max_average_window=15625) Model_Average.apply() - cur_metric = eval(model, valid_dataloader, post_process_class, + cur_metric = eval(model, valid_dataloader, post_process_class, eval_class) cur_metric_str = 'cur metric, {}'.format(', '.join( ['{}: {}'.format(k, v) for k, v in cur_metric.items()])) From dc0a21ffc7462f1cbc488c505ac0a09a8ff445b5 Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Tue, 2 Feb 2021 15:45:57 +0800 Subject: [PATCH 32/77] fix divide zero error --- ppocr/metrics/rec_metric.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ppocr/metrics/rec_metric.py b/ppocr/metrics/rec_metric.py index a86fc838..cfd0805e 100644 --- a/ppocr/metrics/rec_metric.py +++ b/ppocr/metrics/rec_metric.py @@ -29,7 +29,7 @@ class RecMetric(object): pred = pred.replace(" ", "") target = target.replace(" ", "") norm_edit_dis += Levenshtein.distance(pred, target) / max( - len(pred), len(target)) + len(pred), len(target), 1) if pred == target: correct_num += 1 all_num += 1 From 2cda3b614a5e059fae3053f5f616b52fa9fee421 Mon Sep 17 00:00:00 2001 From: tink2123 Date: Tue, 2 Feb 2021 19:28:29 +0800 Subject: [PATCH 33/77] fix typo for attention --- configs/rec/rec_mv3_tps_bilstm_att.yml | 6 +++--- configs/rec/rec_mv3_tps_bilstm_ctc.yml | 2 +- configs/rec/rec_r34_vd_tps_bilstm_att.yml | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/configs/rec/rec_mv3_tps_bilstm_att.yml b/configs/rec/rec_mv3_tps_bilstm_att.yml index c64b2ccc..0ce06734 100644 --- a/configs/rec/rec_mv3_tps_bilstm_att.yml +++ b/configs/rec/rec_mv3_tps_bilstm_att.yml @@ -1,5 +1,5 @@ Global: - use_gpu: true + use_gpu: True epoch_num: 72 log_smooth_window: 20 print_batch_step: 10 @@ -65,7 +65,7 @@ Metric: Train: dataset: - name: LMDBDateSet + name: LMDBDataSet data_dir: ../training/ transforms: - DecodeImage: # load image @@ -84,7 +84,7 @@ Train: Eval: dataset: - name: LMDBDateSet + name: LMDBDataSet data_dir: ../validation/ transforms: - DecodeImage: # load image diff --git a/configs/rec/rec_mv3_tps_bilstm_ctc.yml b/configs/rec/rec_mv3_tps_bilstm_ctc.yml index 1b9fb0a0..4e867099 100644 --- a/configs/rec/rec_mv3_tps_bilstm_ctc.yml +++ b/configs/rec/rec_mv3_tps_bilstm_ctc.yml @@ -1,5 +1,5 @@ Global: - use_gpu: true + use_gpu: True epoch_num: 72 log_smooth_window: 20 print_batch_step: 10 diff --git a/configs/rec/rec_r34_vd_tps_bilstm_att.yml b/configs/rec/rec_r34_vd_tps_bilstm_att.yml index 7be34b9c..02aeb8c5 100644 --- a/configs/rec/rec_r34_vd_tps_bilstm_att.yml +++ b/configs/rec/rec_r34_vd_tps_bilstm_att.yml @@ -1,5 +1,5 @@ Global: - use_gpu: true + use_gpu: True epoch_num: 400 log_smooth_window: 20 print_batch_step: 10 @@ -64,7 +64,7 @@ Metric: Train: dataset: - name: LMDBDateSet + name: LMDBDataSet data_dir: ../training/ transforms: - DecodeImage: # load image @@ -83,7 +83,7 @@ Train: Eval: dataset: - name: LMDBDateSet + name: LMDBDataSet data_dir: ../validation/ transforms: - DecodeImage: # load image From adc83ca9ae086b9588d95243c95309a3ef6db7a4 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Tue, 2 Feb 2021 21:08:13 +0800 Subject: [PATCH 34/77] opt deploy doc --- deploy/cpp_infer/readme.md | 4 +++- deploy/cpp_infer/readme_en.md | 6 ++++-- doc/doc_ch/inference.md | 7 ++++--- doc/doc_ch/installation.md | 2 +- doc/doc_en/inference_en.md | 3 ++- doc/doc_en/installation_en.md | 2 +- tools/infer/utility.py | 1 + train.sh | 2 +- 8 files changed, 17 insertions(+), 10 deletions(-) diff --git a/deploy/cpp_infer/readme.md b/deploy/cpp_infer/readme.md index b563ecf4..f81d9c75 100644 --- a/deploy/cpp_infer/readme.md +++ b/deploy/cpp_infer/readme.md @@ -1,6 +1,8 @@ # 服务器端C++预测 -本教程将介绍在服务器端部署PaddleOCR超轻量中文检测、识别模型的详细步骤。 +本章节介绍PaddleOCR 模型的的C++部署方法,与之对应的python预测部署方式参考[文档](../../doc/doc_ch/inference.md)。 +C++在性能计算上优于python,因此,在大多数CPU、GPU部署场景,多采用C++的部署方式,本节将介绍如何在Linux\Windows (CPU\GPU)环境下配置C++环境并完成 +PaddleOCR模型部署。 ## 1. 准备环境 diff --git a/deploy/cpp_infer/readme_en.md b/deploy/cpp_infer/readme_en.md index 41c764bc..8a0bd62e 100644 --- a/deploy/cpp_infer/readme_en.md +++ b/deploy/cpp_infer/readme_en.md @@ -1,7 +1,9 @@ # Server-side C++ inference - -In this tutorial, we will introduce the detailed steps of deploying PaddleOCR ultra-lightweight Chinese detection and recognition models on the server side. +This chapter introduces the C++ deployment method of the PaddleOCR model, and the corresponding python predictive deployment method refers to [document](../../doc/doc_ch/inference.md). +C++ is better than python in terms of performance calculation. Therefore, in most CPU and GPU deployment scenarios, C++ deployment is mostly used. +This section will introduce how to configure the C++ environment and complete it in the Linux\Windows (CPU\GPU) environment +PaddleOCR model deployment. ## 1. Prepare the environment diff --git a/doc/doc_ch/inference.md b/doc/doc_ch/inference.md index 0daddd9b..822fdf1c 100755 --- a/doc/doc_ch/inference.md +++ b/doc/doc_ch/inference.md @@ -2,10 +2,11 @@ # 基于Python预测引擎推理 inference 模型(`paddle.jit.save`保存的模型) -一般是模型训练完成后保存的固化模型,多用于预测部署。训练过程中保存的模型是checkpoints模型,保存的是模型的参数,多用于恢复训练等。 -与checkpoints模型相比,inference 模型会额外保存模型的结构信息,在预测部署、加速推理上性能优越,灵活方便,适合与实际系统集成。 +一般是模型训练,把模型结构和模型参数保存在文件中的固化模型,多用于预测部署场景。 +训练过程中保存的模型是checkpoints模型,保存的只有模型的参数,多用于恢复训练等。 +与checkpoints模型相比,inference 模型会额外保存模型的结构信息,在预测部署、加速推理上性能优越,灵活方便,适合于实际系统集成。 -接下来首先介绍如何将训练的模型转换成inference模型,然后将依次介绍文本检测、文本角度分类器、文本识别以及三者串联基于预测引擎推理。 +接下来首先介绍如何将训练的模型转换成inference模型,然后将依次介绍文本检测、文本角度分类器、文本识别以及三者串联在CPU、GPU上的预测方法。 - [一、训练模型转inference模型](#训练模型转inference模型) diff --git a/doc/doc_ch/installation.md b/doc/doc_ch/installation.md index fce151eb..95a7a52c 100644 --- a/doc/doc_ch/installation.md +++ b/doc/doc_ch/installation.md @@ -30,7 +30,7 @@ sudo nvidia-docker run --name ppocr -v $PWD:/paddle --shm-size=64G --network=hos sudo docker container exec -it ppocr /bin/bash ``` -**2. 安装PaddlePaddle Fluid v2.0** +**2. 安装PaddlePaddle v2.0** ``` pip3 install --upgrade pip diff --git a/doc/doc_en/inference_en.md b/doc/doc_en/inference_en.md index c8ce1424..d42b00a3 100755 --- a/doc/doc_en/inference_en.md +++ b/doc/doc_en/inference_en.md @@ -5,7 +5,8 @@ The inference model (the model saved by `paddle.jit.save`) is generally a solidi The model saved during the training process is the checkpoints model, which saves the parameters of the model and is mostly used to resume training. -Compared with the checkpoints model, the inference model will additionally save the structural information of the model. It has superior performance in predicting in deployment and accelerating inferencing, is flexible and convenient, and is suitable for integration with actual systems. For more details, please refer to the document [Classification Framework](https://github.com/PaddlePaddle/PaddleClas/blob/master/docs/zh_CN/extension/paddle_inference.md). +Compared with the checkpoints model, the inference model will additionally save the structural information of the model. Therefore, it is easier to deploy because the model structure and model parameters are already solidified in the inference model file, and is suitable for integration with actual systems. +For more details, please refer to the document [Classification Framework](https://github.com/PaddlePaddle/PaddleClas/blob/master/docs/zh_CN/extension/paddle_inference.md). Next, we first introduce how to convert a trained model into an inference model, and then we will introduce text detection, text recognition, angle class, and the concatenation of them based on inference model. diff --git a/doc/doc_en/installation_en.md b/doc/doc_en/installation_en.md index 35c1881d..ad29aa23 100644 --- a/doc/doc_en/installation_en.md +++ b/doc/doc_en/installation_en.md @@ -33,7 +33,7 @@ You can also visit [DockerHub](https://hub.docker.com/r/paddlepaddle/paddle/tags sudo docker container exec -it ppocr /bin/bash ``` -**2. Install PaddlePaddle Fluid v2.0** +**2. Install PaddlePaddle v2.0** ``` pip3 install --upgrade pip diff --git a/tools/infer/utility.py b/tools/infer/utility.py index 4171a29b..e842e7b0 100755 --- a/tools/infer/utility.py +++ b/tools/infer/utility.py @@ -123,6 +123,7 @@ def create_predictor(args, mode, logger): # cache 10 different shapes for mkldnn to avoid memory leak config.set_mkldnn_cache_capacity(10) config.enable_mkldnn() + #config.set_mkldnn_op({'conv2d', 'depthwise_conv2d', 'pool2d', 'batch_norm'}) args.rec_batch_num = 1 # config.enable_memory_optim() diff --git a/train.sh b/train.sh index 8fe861a3..4225470c 100644 --- a/train.sh +++ b/train.sh @@ -1,2 +1,2 @@ # recommended paddle.__version__ == 2.0.0 -python3 -m paddle.distributed.launch --gpus '0,1,2,3,4,5,6,7' tools/train.py -c configs/rec/rec_mv3_none_bilstm_ctc.yml +python3 -m paddle.distributed.launch --log_dir=./debug/ --gpus '0,1,2,3,4,5,6,7' tools/train.py -c configs/rec/rec_mv3_none_bilstm_ctc.yml From df1c97af800f52e5a57ed804dfe8f95e84477e3e Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Tue, 2 Feb 2021 21:27:48 +0800 Subject: [PATCH 35/77] fix bug --- tools/program.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/program.py b/tools/program.py index 99a37432..e91a70e0 100755 --- a/tools/program.py +++ b/tools/program.py @@ -381,6 +381,7 @@ def preprocess(is_train=False): logger = get_logger(name='root', log_file=log_file) if config['Global']['use_visualdl']: from visualdl import LogWriter + save_model_dir = config['Global']['save_model_dir'] vdl_writer_path = '{}/vdl/'.format(save_model_dir) os.makedirs(vdl_writer_path, exist_ok=True) vdl_writer = LogWriter(logdir=vdl_writer_path) From 4e98a521ec8e349452b832d90e70dcf93a4b31c2 Mon Sep 17 00:00:00 2001 From: littletomatodonkey Date: Tue, 2 Feb 2021 14:27:21 +0000 Subject: [PATCH 36/77] improve doc --- configs/cls/cls_mv3.yml | 3 +- .../det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml | 5 ++- .../ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml | 5 ++- configs/det/det_mv3_db.yml | 5 ++- configs/det/det_mv3_east.yml | 5 ++- configs/det/det_r50_vd_db.yml | 5 ++- configs/det/det_r50_vd_east.yml | 5 ++- configs/det/det_r50_vd_sast_icdar15.yml | 5 ++- configs/det/det_r50_vd_sast_totaltext.yml | 5 ++- .../rec_chinese_common_train_v2.0.yml | 1 - .../rec_chinese_lite_train_v2.0.yml | 1 - configs/rec/rec_icdar15_train.yml | 1 - configs/rec/rec_mv3_none_bilstm_ctc.yml | 1 - configs/rec/rec_mv3_none_none_ctc.yml | 1 - configs/rec/rec_mv3_tps_bilstm_att.yml | 1 - configs/rec/rec_mv3_tps_bilstm_ctc.yml | 1 - configs/rec/rec_r34_vd_none_bilstm_ctc.yml | 1 - configs/rec/rec_r34_vd_none_none_ctc.yml | 1 - configs/rec/rec_r34_vd_tps_bilstm_att.yml | 1 - configs/rec/rec_r34_vd_tps_bilstm_ctc.yml | 1 - configs/rec/rec_r50_fpn_srn.yml | 1 - deploy/cpp_infer/docs/windows_vs2019_build.md | 2 +- deploy/cpp_infer/readme.md | 32 ++++++++------- deploy/cpp_infer/readme_en.md | 39 +++++++++---------- doc/doc_ch/models_list.md | 9 ++++- doc/doc_en/models_list_en.md | 10 +++-- 26 files changed, 83 insertions(+), 64 deletions(-) diff --git a/configs/cls/cls_mv3.yml b/configs/cls/cls_mv3.yml index b165bc48..5e643dc3 100644 --- a/configs/cls/cls_mv3.yml +++ b/configs/cls/cls_mv3.yml @@ -7,7 +7,6 @@ Global: save_epoch_step: 3 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [0, 1000] - # if pretrained_model is saved in static mode, load_static_weights must set to True cal_metric_during_train: True pretrained_model: checkpoints: @@ -93,4 +92,4 @@ Eval: shuffle: False drop_last: False batch_size_per_card: 512 - num_workers: 4 \ No newline at end of file + num_workers: 4 diff --git a/configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml b/configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml index fd884959..e001c376 100644 --- a/configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml +++ b/configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml @@ -7,7 +7,10 @@ Global: save_epoch_step: 1200 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [3000, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True + # 1. If pretrained_model is saved in static mode, such as classification pretrained model + # from static branch, load_static_weights must be set as True. + # 2. If you want to finetune the pretrained models we provide in the docs, + # you should set load_static_weights as False. load_static_weights: True cal_metric_during_train: False pretrained_model: ./pretrain_models/MobileNetV3_large_x0_5_pretrained diff --git a/configs/det/ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml b/configs/det/ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml index 26946012..4229248d 100644 --- a/configs/det/ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml +++ b/configs/det/ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml @@ -7,7 +7,10 @@ Global: save_epoch_step: 1200 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [3000, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True + # 1. If pretrained_model is saved in static mode, such as classification pretrained model + # from static branch, load_static_weights must be set as True. + # 2. If you want to finetune the pretrained models we provide in the docs, + # you should set load_static_weights as False. load_static_weights: True cal_metric_during_train: False pretrained_model: ./pretrain_models/ResNet18_vd_pretrained diff --git a/configs/det/det_mv3_db.yml b/configs/det/det_mv3_db.yml index 00a16b5a..f8aab705 100644 --- a/configs/det/det_mv3_db.yml +++ b/configs/det/det_mv3_db.yml @@ -7,7 +7,10 @@ Global: save_epoch_step: 1200 # evaluation is run every 2000 iterations eval_batch_step: [0, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True + # 1. If pretrained_model is saved in static mode, such as classification pretrained model + # from static branch, load_static_weights must be set as True. + # 2. If you want to finetune the pretrained models we provide in the docs, + # you should set load_static_weights as False. load_static_weights: True cal_metric_during_train: False pretrained_model: ./pretrain_models/MobileNetV3_large_x0_5_pretrained diff --git a/configs/det/det_mv3_east.yml b/configs/det/det_mv3_east.yml index 05581a76..187ac160 100644 --- a/configs/det/det_mv3_east.yml +++ b/configs/det/det_mv3_east.yml @@ -7,7 +7,10 @@ Global: save_epoch_step: 1000 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [4000, 5000] - # if pretrained_model is saved in static mode, load_static_weights must set to True + # 1. If pretrained_model is saved in static mode, such as classification pretrained model + # from static branch, load_static_weights must be set as True. + # 2. If you want to finetune the pretrained models we provide in the docs, + # you should set load_static_weights as False. load_static_weights: True cal_metric_during_train: False pretrained_model: ./pretrain_models/MobileNetV3_large_x0_5_pretrained diff --git a/configs/det/det_r50_vd_db.yml b/configs/det/det_r50_vd_db.yml index 19c059d6..3fa8948d 100644 --- a/configs/det/det_r50_vd_db.yml +++ b/configs/det/det_r50_vd_db.yml @@ -7,7 +7,10 @@ Global: save_epoch_step: 1200 # evaluation is run every 2000 iterations eval_batch_step: [0,2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True + # 1. If pretrained_model is saved in static mode, such as classification pretrained model + # from static branch, load_static_weights must be set as True. + # 2. If you want to finetune the pretrained models we provide in the docs, + # you should set load_static_weights as False. load_static_weights: True cal_metric_during_train: False pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained diff --git a/configs/det/det_r50_vd_east.yml b/configs/det/det_r50_vd_east.yml index b8fe55d4..abef0b61 100644 --- a/configs/det/det_r50_vd_east.yml +++ b/configs/det/det_r50_vd_east.yml @@ -7,7 +7,10 @@ Global: save_epoch_step: 1000 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [4000, 5000] - # if pretrained_model is saved in static mode, load_static_weights must set to True + # 1. If pretrained_model is saved in static mode, such as classification pretrained model + # from static branch, load_static_weights must be set as True. + # 2. If you want to finetune the pretrained models we provide in the docs, + # you should set load_static_weights as False. load_static_weights: True cal_metric_during_train: False pretrained_model: ./pretrain_models/ResNet50_vd_pretrained/ diff --git a/configs/det/det_r50_vd_sast_icdar15.yml b/configs/det/det_r50_vd_sast_icdar15.yml index a989bc8f..c24cae90 100755 --- a/configs/det/det_r50_vd_sast_icdar15.yml +++ b/configs/det/det_r50_vd_sast_icdar15.yml @@ -7,7 +7,10 @@ Global: save_epoch_step: 1000 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [4000, 5000] - # if pretrained_model is saved in static mode, load_static_weights must set to True + # 1. If pretrained_model is saved in static mode, such as classification pretrained model + # from static branch, load_static_weights must be set as True. + # 2. If you want to finetune the pretrained models we provide in the docs, + # you should set load_static_weights as False. load_static_weights: True cal_metric_during_train: False pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained/ diff --git a/configs/det/det_r50_vd_sast_totaltext.yml b/configs/det/det_r50_vd_sast_totaltext.yml index e040c420..e6f467c6 100755 --- a/configs/det/det_r50_vd_sast_totaltext.yml +++ b/configs/det/det_r50_vd_sast_totaltext.yml @@ -7,7 +7,10 @@ Global: save_epoch_step: 1000 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [4000, 5000] - # if pretrained_model is saved in static mode, load_static_weights must set to True + # 1. If pretrained_model is saved in static mode, such as classification pretrained model + # from static branch, load_static_weights must be set as True. + # 2. If you want to finetune the pretrained models we provide in the docs, + # you should set load_static_weights as False. load_static_weights: True cal_metric_during_train: False pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained/ diff --git a/configs/rec/ch_ppocr_v2.0/rec_chinese_common_train_v2.0.yml b/configs/rec/ch_ppocr_v2.0/rec_chinese_common_train_v2.0.yml index 1db3e1cb..6a524e22 100644 --- a/configs/rec/ch_ppocr_v2.0/rec_chinese_common_train_v2.0.yml +++ b/configs/rec/ch_ppocr_v2.0/rec_chinese_common_train_v2.0.yml @@ -7,7 +7,6 @@ Global: save_epoch_step: 3 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [0, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True cal_metric_during_train: True pretrained_model: checkpoints: diff --git a/configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml b/configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml index dc9d650f..c96621c5 100644 --- a/configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml +++ b/configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml @@ -7,7 +7,6 @@ Global: save_epoch_step: 3 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [0, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True cal_metric_during_train: True pretrained_model: checkpoints: diff --git a/configs/rec/rec_icdar15_train.yml b/configs/rec/rec_icdar15_train.yml index 8a743b50..5ae47c67 100644 --- a/configs/rec/rec_icdar15_train.yml +++ b/configs/rec/rec_icdar15_train.yml @@ -7,7 +7,6 @@ Global: save_epoch_step: 3 # evaluation is run every 2000 iterations eval_batch_step: [0, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True cal_metric_during_train: True pretrained_model: checkpoints: diff --git a/configs/rec/rec_mv3_none_bilstm_ctc.yml b/configs/rec/rec_mv3_none_bilstm_ctc.yml index 00c1db88..900e98b6 100644 --- a/configs/rec/rec_mv3_none_bilstm_ctc.yml +++ b/configs/rec/rec_mv3_none_bilstm_ctc.yml @@ -7,7 +7,6 @@ Global: save_epoch_step: 3 # evaluation is run every 2000 iterations eval_batch_step: [0, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True cal_metric_during_train: True pretrained_model: checkpoints: diff --git a/configs/rec/rec_mv3_none_none_ctc.yml b/configs/rec/rec_mv3_none_none_ctc.yml index 6711b1d2..6d86b90c 100644 --- a/configs/rec/rec_mv3_none_none_ctc.yml +++ b/configs/rec/rec_mv3_none_none_ctc.yml @@ -7,7 +7,6 @@ Global: save_epoch_step: 3 # evaluation is run every 2000 iterations eval_batch_step: [0, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True cal_metric_during_train: True pretrained_model: checkpoints: diff --git a/configs/rec/rec_mv3_tps_bilstm_att.yml b/configs/rec/rec_mv3_tps_bilstm_att.yml index 0ce06734..1b10410a 100644 --- a/configs/rec/rec_mv3_tps_bilstm_att.yml +++ b/configs/rec/rec_mv3_tps_bilstm_att.yml @@ -7,7 +7,6 @@ Global: save_epoch_step: 3 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [0, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True cal_metric_during_train: True pretrained_model: checkpoints: diff --git a/configs/rec/rec_mv3_tps_bilstm_ctc.yml b/configs/rec/rec_mv3_tps_bilstm_ctc.yml index 4e867099..026c6a9d 100644 --- a/configs/rec/rec_mv3_tps_bilstm_ctc.yml +++ b/configs/rec/rec_mv3_tps_bilstm_ctc.yml @@ -7,7 +7,6 @@ Global: save_epoch_step: 3 # evaluation is run every 2000 iterations eval_batch_step: [0, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True cal_metric_during_train: True pretrained_model: checkpoints: diff --git a/configs/rec/rec_r34_vd_none_bilstm_ctc.yml b/configs/rec/rec_r34_vd_none_bilstm_ctc.yml index e4d301a6..4052d426 100644 --- a/configs/rec/rec_r34_vd_none_bilstm_ctc.yml +++ b/configs/rec/rec_r34_vd_none_bilstm_ctc.yml @@ -7,7 +7,6 @@ Global: save_epoch_step: 3 # evaluation is run every 2000 iterations eval_batch_step: [0, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True cal_metric_during_train: True pretrained_model: checkpoints: diff --git a/configs/rec/rec_r34_vd_none_none_ctc.yml b/configs/rec/rec_r34_vd_none_none_ctc.yml index 4a17a004..c3e1d9a3 100644 --- a/configs/rec/rec_r34_vd_none_none_ctc.yml +++ b/configs/rec/rec_r34_vd_none_none_ctc.yml @@ -7,7 +7,6 @@ Global: save_epoch_step: 3 # evaluation is run every 2000 iterations eval_batch_step: [0, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True cal_metric_during_train: True pretrained_model: checkpoints: diff --git a/configs/rec/rec_r34_vd_tps_bilstm_att.yml b/configs/rec/rec_r34_vd_tps_bilstm_att.yml index 02aeb8c5..e25eca95 100644 --- a/configs/rec/rec_r34_vd_tps_bilstm_att.yml +++ b/configs/rec/rec_r34_vd_tps_bilstm_att.yml @@ -7,7 +7,6 @@ Global: save_epoch_step: 3 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [0, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True cal_metric_during_train: True pretrained_model: checkpoints: diff --git a/configs/rec/rec_r34_vd_tps_bilstm_ctc.yml b/configs/rec/rec_r34_vd_tps_bilstm_ctc.yml index 62edf843..8b80b82d 100644 --- a/configs/rec/rec_r34_vd_tps_bilstm_ctc.yml +++ b/configs/rec/rec_r34_vd_tps_bilstm_ctc.yml @@ -7,7 +7,6 @@ Global: save_epoch_step: 3 # evaluation is run every 2000 iterations eval_batch_step: [0, 2000] - # if pretrained_model is saved in static mode, load_static_weights must set to True cal_metric_during_train: True pretrained_model: checkpoints: diff --git a/configs/rec/rec_r50_fpn_srn.yml b/configs/rec/rec_r50_fpn_srn.yml index ec7f1705..6285c95d 100644 --- a/configs/rec/rec_r50_fpn_srn.yml +++ b/configs/rec/rec_r50_fpn_srn.yml @@ -7,7 +7,6 @@ Global: save_epoch_step: 3 # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [0, 5000] - # if pretrained_model is saved in static mode, load_static_weights must set to True cal_metric_during_train: True pretrained_model: checkpoints: diff --git a/deploy/cpp_infer/docs/windows_vs2019_build.md b/deploy/cpp_infer/docs/windows_vs2019_build.md index 21fbf4e0..0f243bf8 100644 --- a/deploy/cpp_infer/docs/windows_vs2019_build.md +++ b/deploy/cpp_infer/docs/windows_vs2019_build.md @@ -14,7 +14,7 @@ PaddleOCR在Windows 平台下基于`Visual Studio 2019 Community` 进行了测 ### Step1: 下载PaddlePaddle C++ 预测库 fluid_inference -PaddlePaddle C++ 预测库针对不同的`CPU`和`CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_guide/inference_deployment/inference/windows_cpp_inference.html) +PaddlePaddle C++ 预测库针对不同的`CPU`和`CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/windows_cpp_inference.html) 解压后`D:\projects\fluid_inference`目录包含内容为: ``` diff --git a/deploy/cpp_infer/readme.md b/deploy/cpp_infer/readme.md index b563ecf4..41836915 100644 --- a/deploy/cpp_infer/readme.md +++ b/deploy/cpp_infer/readme.md @@ -72,9 +72,21 @@ opencv3/ * 有2种方式获取Paddle预测库,下面进行详细介绍。 -#### 1.2.1 预测库源码编译 +#### 1.2.1 直接下载安装 + +* [Paddle预测库官网](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/build_and_install_lib_cn.html)上提供了不同cuda版本的Linux预测库,可以在官网查看并选择合适的预测库版本。 + +* 下载之后使用下面的方法解压。 + +``` +tar -xf paddle_inference.tgz +``` + +最终会在当前的文件夹中生成`paddle_inference/`的子文件夹。 + +#### 1.2.2 预测库源码编译 * 如果希望获取最新预测库特性,可以从Paddle github上克隆最新代码,源码编译预测库。 -* 可以参考[Paddle预测库官网](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html)的说明,从github上获取Paddle代码,然后进行编译,生成最新的预测库。使用git获取代码方法如下。 +* 可以参考[Paddle预测库官网](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/build_and_install_lib_cn.html)的说明,从github上获取Paddle代码,然后进行编译,生成最新的预测库。使用git获取代码方法如下。 ```shell git clone https://github.com/PaddlePaddle/Paddle.git @@ -100,7 +112,7 @@ make -j make inference_lib_dist ``` -更多编译参数选项可以参考Paddle C++预测库官网:[https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html)。 +更多编译参数选项可以参考Paddle C++预测库官网:[https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/build_and_install_lib_cn.html](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/build_and_install_lib_cn.html)。 * 编译完成之后,可以在`build/paddle_inference_install_dir/`文件下看到生成了以下文件及文件夹。 @@ -115,17 +127,7 @@ build/paddle_inference_install_dir/ 其中`paddle`就是C++预测所需的Paddle库,`version.txt`中包含当前预测库的版本信息。 -#### 1.2.2 直接下载安装 -* [Paddle预测库官网](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html)上提供了不同cuda版本的Linux预测库,可以在官网查看并选择合适的预测库版本。 - -* 下载之后使用下面的方法解压。 - -``` -tar -xf paddle_inference.tgz -``` - -最终会在当前的文件夹中生成`paddle_inference/`的子文件夹。 ## 2 开始运行 @@ -223,7 +225,7 @@ char_list_file ../../ppocr/utils/ppocr_keys_v1.txt # 字典文件 visualize 1 # 是否对结果进行可视化,为1时,会在当前文件夹下保存文件名为`ocr_vis.png`的预测结果。 ``` -* PaddleOCR也支持多语言的预测,更多细节可以参考[识别文档](../../doc/doc_ch/recognition.md)中的多语言字典与模型部分。 +* PaddleOCR也支持多语言的预测,更多支持的语言和模型可以参考[识别文档](../../doc/doc_ch/recognition.md)中的多语言字典与模型部分,如果希望进行多语言预测,只需将修改`tools/config.txt`中的`char_list_file`(字典文件路径)以及`rec_model_dir`(inference模型路径)字段即可。 最终屏幕上会输出检测结果如下。 @@ -234,4 +236,4 @@ visualize 1 # 是否对结果进行可视化,为1时,会在当前文件夹 ### 2.3 注意 -* 在使用Paddle预测库时,推荐使用2.0.0-beta0版本的预测库。 +* 在使用Paddle预测库时,推荐使用2.0.0版本的预测库。 diff --git a/deploy/cpp_infer/readme_en.md b/deploy/cpp_infer/readme_en.md index 41c764bc..6bc49e94 100644 --- a/deploy/cpp_infer/readme_en.md +++ b/deploy/cpp_infer/readme_en.md @@ -74,10 +74,23 @@ opencv3/ * There are 2 ways to obtain the Paddle inference library, described in detail below. +#### 1.2.1 Direct download and installation -#### 1.2.1 Compile from the source code +* Different cuda versions of the Linux inference library (based on GCC 4.8.2) are provided on the +[Paddle inference library official website](https://www.paddlepaddle.org.cn/documentation/docs/en/develop/guides/05_inference_deployment/inference/build_and_install_lib_en.html). You can view and select the appropriate version of the inference library on the official website. + + +* After downloading, use the following method to uncompress. + +``` +tar -xf paddle_inference.tgz +``` + +Finally you can see the following files in the folder of `paddle_inference/`. + +#### 1.2.2 Compile from the source code * If you want to get the latest Paddle inference library features, you can download the latest code from Paddle github repository and compile the inference library from the source code. -* You can refer to [Paddle inference library] (https://www.paddlepaddle.org.cn/documentation/docs/en/advanced_guide/inference_deployment/inference/build_and_install_lib_en.html) to get the Paddle source code from github, and then compile To generate the latest inference library. The method of using git to access the code is as follows. +* You can refer to [Paddle inference library] (https://www.paddlepaddle.org.cn/documentation/docs/en/develop/guides/05_inference_deployment/inference/build_and_install_lib_en.html) to get the Paddle source code from github, and then compile To generate the latest inference library. The method of using git to access the code is as follows. ```shell @@ -104,7 +117,7 @@ make -j make inference_lib_dist ``` -For more compilation parameter options, please refer to the official website of the Paddle C++ inference library:[https://www.paddlepaddle.org.cn/documentation/docs/en/advanced_guide/inference_deployment/inference/build_and_install_lib_en.html](https://www.paddlepaddle.org.cn/documentation/docs/en/advanced_guide/inference_deployment/inference/build_and_install_lib_en.html). +For more compilation parameter options, please refer to the official website of the Paddle C++ inference library:[https://www.paddlepaddle.org.cn/documentation/docs/en/develop/guides/05_inference_deployment/inference/build_and_install_lib_en.html](https://www.paddlepaddle.org.cn/documentation/docs/en/develop/guides/05_inference_deployment/inference/build_and_install_lib_en.html). * After the compilation process, you can see the following files in the folder of `build/paddle_inference_install_dir/`. @@ -120,22 +133,6 @@ build/paddle_inference_install_dir/ Among them, `paddle` is the Paddle library required for C++ prediction later, and `version.txt` contains the version information of the current inference library. - -#### 1.2.2 Direct download and installation - -* Different cuda versions of the Linux inference library (based on GCC 4.8.2) are provided on the -[Paddle inference library official website](https://www.paddlepaddle.org.cn/documentation/docs/en/advanced_guide/inference_deployment/inference/build_and_install_lib_en.html). You can view and select the appropriate version of the inference library on the official website. - - -* After downloading, use the following method to uncompress. - -``` -tar -xf paddle_inference.tgz -``` - -Finally you can see the following files in the folder of `paddle_inference/`. - - ## 2. Compile and run the demo ### 2.1 Export the inference model @@ -233,7 +230,7 @@ char_list_file ../../ppocr/utils/ppocr_keys_v1.txt # dictionary file visualize 1 # Whether to visualize the results,when it is set as 1, The prediction result will be save in the image file `./ocr_vis.png`. ``` -* Multi-language inference is also supported in PaddleOCR, for more details, please refer to part of multi-language dictionaries and models in [recognition tutorial](../../doc/doc_en/recognition_en.md). +* Multi-language inference is also supported in PaddleOCR, you can refer to [recognition tutorial](../../doc/doc_en/recognition_en.md) for more supported languages and models in PaddleOCR. Specifically, if you want to infer using multi-language models, you just need to modify values of `char_list_file` and `rec_model_dir` in file `tools/config.txt`. The detection results will be shown on the screen, which is as follows. @@ -245,4 +242,4 @@ The detection results will be shown on the screen, which is as follows. ### 2.3 Notes -* Paddle2.0.0-beta0 inference model library is recommended for this toturial. +* Paddle2.0.0 inference model library is recommended for this toturial. diff --git a/doc/doc_ch/models_list.md b/doc/doc_ch/models_list.md index fbfb3838..efb75f86 100644 --- a/doc/doc_ch/models_list.md +++ b/doc/doc_ch/models_list.md @@ -12,9 +12,14 @@ PaddleOCR提供的可下载模型包括`推理模型`、`训练模型`、`预训 |模型类型|模型格式|简介| |--- | --- | --- | -|推理模型|inference.pdmodel、inference.pdiparams|用于python预测引擎推理,[详情](./inference.md)| +|推理模型|inference.pdmodel、inference.pdiparams|用于预测引擎推理,[详情](./inference.md)| |训练模型、预训练模型|\*.pdparams、\*.pdopt、\*.states |训练过程中保存的模型的参数、优化器状态和训练中间信息,多用于模型指标评估和恢复训练| -|slim模型|\*.nb|用于lite部署| +|slim模型|\*.nb|经过飞桨模型压缩工具PaddleSlim压缩后的模型,适用于移动端/IoT端等端侧部署场景(需使用飞桨Paddle Lite部署)。| + + +各个模型的关系如下面的示意图所示。 + +![](../imgs/model_prod_flow_ch.png) diff --git a/doc/doc_en/models_list_en.md b/doc/doc_en/models_list_en.md index 33033f83..577f2aa5 100644 --- a/doc/doc_en/models_list_en.md +++ b/doc/doc_en/models_list_en.md @@ -12,9 +12,13 @@ The downloadable models provided by PaddleOCR include `inference model`, `traine |model type|model format|description| |--- | --- | --- | -|inference model|inference.pdmodel、inference.pdiparams|Used for reasoning based on Python prediction engine,[detail](./inference_en.md)| +|inference model|inference.pdmodel、inference.pdiparams|Used for inference based on Paddle inference engine,[detail](./inference_en.md)| |trained model, pre-trained model|\*.pdparams、\*.pdopt、\*.states |The checkpoints model saved in the training process, which stores the parameters of the model, mostly used for model evaluation and continuous training.| -|slim model|\*.nb|Generally used for Lite deployment| +|slim model|\*.nb| Model compressed by PaddleSim (a model compression tool using PaddlePaddle), which is suitable for mobile-side deployment scenarios (Paddle-Lite is needed for slim model deployment). | + +Relationship of the above models is as follows. + +![](../imgs_en/model_prod_flow_en.png) ### 1. Text Detection Model @@ -80,7 +84,7 @@ If you want to train your own model, you can prepare the training set file, veri cd {your/path/}PaddleOCR/configs/rec/multi_language/ # The -l or --language parameter is required # --train modify train_list path -# --val modify eval_list path +# --val modify eval_list path # --data_dir modify data dir # -o modify default parameters # --dict Change the dictionary path. The example uses the default dictionary path, so that this parameter can be empty. From 4316b15798c1e837f6d15cd5fd22ff0296247be3 Mon Sep 17 00:00:00 2001 From: littletomatodonkey Date: Tue, 2 Feb 2021 14:29:15 +0000 Subject: [PATCH 37/77] add png --- doc/imgs/model_prod_flow_ch.png | Bin 0 -> 66158 bytes doc/imgs_en/model_prod_flow_en.png | Bin 0 -> 64484 bytes 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 doc/imgs/model_prod_flow_ch.png create mode 100644 doc/imgs_en/model_prod_flow_en.png diff --git a/doc/imgs/model_prod_flow_ch.png b/doc/imgs/model_prod_flow_ch.png new file mode 100644 index 0000000000000000000000000000000000000000..4906b2716e2b3cf278416e1766b8a63329362693 GIT binary patch literal 66158 zcmb@u1yI%N7d8qTkW}dqLAo1hBvndMx*L&hNeO95=>`eu?(PNw>23jO3Ca8Jqvw43 z{m-2_bLY@K&XFcm#zd$(|vAZY_P+(wS?n;P1Re*s(dApmO_j15bKWVi@hI!5~KK8Yh{ z++YVp84Gg_%}@$v8X|#SbtfSCo}Py1R4+jwtxy$W4^sePi|1JTv(~v5g~IHj`3#eJ58DL&qadP}vPm6BuKugP<54u!B zS4)ZeeK?;E+IMk%{CDnnnBDQS*hIk$#$3K`nbbaTc}=z>yDC4zD5pJv?zWIUsdl2fOlq)-$u!!pu|bYGk-ecyUDyqUjm zvzP3;KgV}Alw9wYGHI5y|6_0QjHf$^XZXxH#Xisv$roPS^S`}F#*v?9ULZaEk6#C0 z6T(RIj^TnJhr|4**LffePK5_#Q18Fa$P-0<0ggfl@}FKy)DXqb7s!wONntTb{?m)@ zmGk}oIQ4MPR3tg!9qP{y{r=l#dcuE7TYvH&r!EBXAQM6;Ock=j|BqA01RH<$-*z3W z@c)O6M_+=kvRY)CuXFe?S8Lb*N&ce(zlPHVkNZUh+9Lr?$!I!CM)eBuL)E5NZ@x{j znto36!e&%gY`Qv<%F#;Uwh`H@-xr|OZm4;Y!ar^L1UFN_ODdL0;%skzJ@xHPM4?8t z}I`X3agLQQ( z4!v^E*O8CwIjJ6)G}9#O{jp9a0gCy@TREu-W^b=gCZo%SM|82MUWEIi;^k#bYP(AF zI&80fdT^`|-{OscYIv~J%J%9r39Uv|!GpWQ^6P!k>xI<|kvS6#4h3zQ|?+Z*Q+Bue>^6oqn6n((eq43=dDk_I{cD zLLN8Ys4ps_`_WrXnPmR;&abp)EaLvf_QpA$$1eM^<=j*f%u)KQ{jr>|i$i-K zuW1adf4P6?BJi(-2}STeNd11@tfkXrl6l!|*Ti$xEA!NB4$@jj8^xTCuf9!}v*~vr zIbZHpug_GP9i2?-^n-J!ZN9y!*NKjeFE@w@$ZWo$DF}J(fiKnAD z-__kMwb(yUKLzdDHEU>vWYxqR`0Zi-hV|OoD?K5}Xzb6vK`V+OpNDTvjQM zO-G-m9(|?s@`6L``@S`1v^AO)$z`=b(O79dtzbM5M|!sSwpp{`jeJ+wqtwiM?Em7B zNRq&XCzMf1G^6~@nTSsqu@;!7r#*4nOL|YpC4(W#KUifk+jq3yKlYA9lj%}qZY#%n zxvdGmkJ>#`DptnEDTkfL^zcK{lKScXVk*kanozCUy-tQXyf7lZi7YAe4kVA2Kx+8C zPi7MZv|9C!4tDU$neDCCKtl%PIr>7CHvh);K7#}4{m3CR~`Xoz@Y3@8!&=G z-%=4C7l)sUwj(1*ps%neGfn;B#kx$|iw2w3=X7D1#Pe3uWzSMPE=+58t7bLTxy>E~ zz$G*52yD&QSGQzw{#(Roiw3TH*=4=f^7qkW;dL9GyNR5pzId6$fA$w}?` z)Kj6Fq0@D>9;*X?=u5kRCB!g2QK*5c>3z00E=5NEq1j3|d7<1yw$|-OS&+)kHNzC# zzeEstYFN5gKF4)76Tcgv2Z>y@j8>w&O&D0j7y={6_&iejLkN*S0Gi z$brqaoio1ip^qmt(u81-9|p3U4E1!@a3?lQO}&q#cVbH9Ps;p$5kmY#{lu4JHZ0W; zUFr^ZG_#KO60vhnIO+Dyy>V&S7$f%I-PYf+juSWrGan!7-_MM~I=CgKP{T4AKbKA^ zZz^eZ&Hs@{c$rbZLN+=kS2GmP-mOYMNF;Y5)IY}a}yRtFL|q*6RK2+D7Wx}JL-Vs%mH#*XeFuG2*=gxBxY z(MvS<8oioa)~L3OMnJ`-P)T0@W6OT-C%Oy#d!jMyKgH%(n@soB&vy}u6GxKd=tL!5Ijk()y$Aes)M$8Qd-qRp>H*p$Ge5k3Z>F5d}%&)t? zJd=GKRbYI$F_?&Tyw+=Q>ca!P@uhJs$8x@h$ITBDy9VRI1U3H>2|~%QPuttLN8~c7 zwCn798Q8Hi1O3KJrn?azx#`B#CVrhNHONT+&N;0=fA_&-@zDmO{+PU!rWkHc)6tIX z6urTOS2TuLcYQN>sK1BsU)x=s+az-KCNHJf*fXN_k3SV z(oAK&@BDaJFm`H6wOn~DTY7y=R*++OxocuuW{SZ&B97m=%zUwFK1<5rF(Tm?_9!Icah*~0m)f*yzMe3B zyD-Nb&t{~c$?C+oIh;J%OVM3!GK`{6)qJs?ukgv5G||?oI5oY4>ylovr^>3l1^t>zQTrkbVbCC0o}#axApFUun; z*WBOcgf~265qol9=D_dy5J>jnl_9bi z#+8OPrLxAQglf~VE&&ZUn`Ro7nZd+F_gj{0M0ShR^P_%dB~2}Jh7vrhqD35XTqgZ* zuW4V`sGll+xASsbLRRVj^Va+fq)5;#!EbDQL;ZI=7Kwg-(0(5QWda;+Zfb+`aRP3d zTmWgxH}IB%*WD$t%8yXTtCA)Y1ZYm93ks*bIbK@1QFR2|$^ZkDT;;-qAdT0I)!Bp( zN}neO7GKy*G`rv2TwksSGdIZ{+DgSR|W7fu2Wg+H&^VuB0PTlN?xF@bvw!^`X zquPt$i|qbc>f+nmLWa=cWIk$ujjNcecqWU&3Ll!&QP}A_Z0_W}R;;+aDWKJ>kt|Zo zjTG8K>wGdN*B)78FR zz&^6CI*2>bWvIA?r!oFRyrpy|w--Ci&R2)s(JQtf95G$0HhVNqQ$~5v;(!RVcRn+N zVflKwYBD($SU~M;qjcjZnltR;h=>TzU4=_{rld+;i;^_IQTy#Lx!(AdMHE?Q!Ii_C zsc#qfL0B$`p9loO3$>sKLuXPO9NvUkVC9rX%xu`^*N3}H6B@zeMrA{*5x>pz?<7Yg zzNN4^NYpu6lkg8emPzIXLYKq`DrxwMkdh0{Mwdzkkbiw+L@0de*;y1*pjN(}*)0a0 ze9MgY5%ACusb%XZShzbqpVLJtsi?X%Z$KiHDe8Yu;p6kWYPGgm@%uCpk(Zq__cdwE z&ZdL$m_fv@PUs8>L(SjD(|*%BP)-G=-W~RctKM!yd%9qW0(PB2|FIeJu6Jn{|%YV-aA zr`zNG_E>06Gd6fDiX*^gn3DONIQ#A0-M^n+tPiK^IAw(@2dgJS@TfP3Q}M1|(W;eY z${dOR%gptJ!OTl)1~e&tpLsDd6p9!RCFR!nR%XyUb%l2M*~<2x?#|9vJ#Fx~uCvSz zZQAb6eu+m7vR*DJ!K(vN9=nbA&U-Yl!GjsI%`m67H+3&fTLM%*_o_d~gVcrLsu#&tA zNdN=~#(WU(_q{{L8n9Cd{IcCW9yd|8jVmQzQ?ol;U0lg6;Q}z7zQu=3kq3sEj~(f< zQGC|`I9#@#xV|{~5GHWZ!!UNJ9kIN7t?&3+q9sVVA20xP2Hl}beb%n@Dq4tQJC)+S zC!&`up4AH~8FOr5Zl5g1p_acLDg=CcdL{^cM@p$8FO^IjAF_k*a1FVd{le|aJ@ zhmdIJBwHX!Wg%f^;d=4il*eY}T|CaCN5!Kvl{+891n4@0@wocM-s9`7o{lq=NPRu! zomIL1dQ;FRWu_h*3lPIhwNG2PR$gU@jNtY6u}DfOT!+^V^EiJTU6Gu{vOqTXI?QEx3Y)>leKR#ZTy;|T+dgZa16xTd2I(1IK(CO^)nRsm5Vgh z(lYMOyR`wmx<_44Jb6UX1*M!1VZ1?puY5BE5*c0RI z_%=F?K{? zt=*=~KrFM)3Ul*SSY``j0JH6hxp8+-xCve;=S zTRTy$C)AqMqSLRcI@MEMwrQ0XN)5!=A0g|^Gjk?CpvD{0kvl`%cnsL0OZ}Y91s_7!W6mDtn3PFw?g8A}( zlilV}2Q+y#g}VoL2TdI6Rf}HRJzlAyGVTc%u58EH4DHBEYurq6jZ}0#8CSvTk7Z6Z zyaGXrask*-pr1pIG$9RK;W39MIyIAeMNhVjt(}iICmkJKDd3Y{Ytp<)=l9bmTou_m z@1rl~R(x@}?NJ(EVxzA*LienEIbph~XWdz{_z~}TpC$jxls=2<`Rpl&y}9YJ>;l<; z^-v^4%6e{B>3>)r405JtOO{luA;`0Be;gUr=I?1m@<ufig|?%%eh0tvcw-Q&Lt=L{D;Qr=U2JfWU+2WY@$2)IB`>%#^B=bB zZ9h(DtyGE<>^26r$|z?@9If(|3p1@F$nNd5Sv=OH6kSp{Pqb>XQ5_x|0EU%2Rjrh# z7}3+yLoui|xC`Y}-rjn!Vz=jo4JLB+$1rNZ7kMn}<4+Ejq{)ot3>}OE&()5xqgH31 zi{IyJRyDg7rP+M4GbvU1di)E%-?Kt|!iRGYtY)h$^v&Vr97YiL@cv8=e{RJ)LTE~~ z8)Vn`drCxsP61zlqTW%-QQ+odxUtMS2vX1f_XO;jD#hAtufNb#<6h60759P#(B57y z-Wpzf-?FUKqx`qn#sR@duOcb({|y!ayhrjFNt2@>s1lo5Ta(%u4IKiwW;x{r?!7A~ zld^I)O_u%|o7GH^p+MPFYK>H!-y`GSfL{?Au$Fl$F*{_xbBJCf)Yj{LIF%6c>_U7S zZ67y|bT0RrQoqF4?p0=Gs1G^K)c1*&AytEer@|FScGXF$JO`TvI{phqjO?$5lHj47U_o^^Wkq0wKaKi$A& zeKafImggo`|}Qc#uSIM(y0T zPxS9|`G0%g7A92pRP?IyoAfR52Le>iUaS79-oP=Nn+Gwg;hs9kYrXMmd-e4VSNk`8 z3bpnE&7p&>|8M`-@1H{wJ$0}vP?fLfSF?-48TsxdJ|2DcRKorRXW;u@26=e`N;2N( z;1fbYj7TP;C(MayaY*8jz(6jF!#f_BYtKa*G^J`NCX2fyM6P(q_Y-(FgN#u6>~?qh zt{tQjom&$=&7RzkeW}CRl-_PzMUE%Nb!Q(>Oz~-tuVw+f_xi2uPE!0Ix8nC30*fi0 zB9vFht=)WGf=0})Q?<{ZVT_4kN@HYTCvow@8ExG>X3eDg0R?&*d`TdQR%U5LOHB^> zM*~XrvP5?2fMyaE=9KVv{W)K<-w|6~k*rJNgtyKUN{!nh47#*NiOWc!zc5?Y{ADIB z7_i*Y(+$Tp1&>9nuj61uVZ2)Ik=b0i%{Wpv^mGIn8H);*x9;uy;lS)dsH=Ht6kCc!=^BuParZ}lr2jS#; zpRWPt+vSou76yw$y}UuV@wLj$z38~_xpiD*zU+<*FX&$-tDW&!^=e9njA z@y!-d&*+_ce$>!=Jy9D9h4&5@niwF+luu?G-EN{&xI85ju7SwJ|MJZW{xFN8Fa7CV zDPd)i58TIJ>O({Q5f9{sSviy<(B_BF)k!fF%|XDtWlzJYZf^r<~OPe z2oPtMFP)~K<1If5$H0gdZfw{0A!US-e(@q)2oo|D73`#aHCN;KtcY5ZQ^d2Qx3Yip zeMpeX;6*3uW~9l?cy!E}a=7@LkAEW%p*V~s*H>{`)=XTwNC)m5x=aYvA`l8)(xsOO zyW1g%nX^l#m`=0{=RSPT**2IAYS@O%2|^_>k#r!)hc>obYxinBo@ld~ypJgLne9tf z@@@|T$=5-nRjn;_5l9>msflk9>8S&;aDJ!;Nc@eN!$}&WCIj^HTkL)_wzb5V;&zb9 zBJIfh*wFs*nh$;XXjF(03ZF`e%^-`TFB3>PQ?IwJVg_A+#!J?M7-|1_*=`us{)oGhIzwAo<3B>Kb^pmP@ z@$hcIuJfBRdz*k$nIm1oLSSStNhRP`qBSvKTaSLvWA%}c+v#h$j4Gz5RESiLlhjq` zs-y(p^5ChkfdpX;-uvL6x1R+WKHuB%`m}xYvM}EE`?+(wk)+G#)hM^Kh6& z6?dc$THsGL6ib!0F#1Dng{%xF=}Ci8`4rLROcCWQbI2rqL@8MO%x^3Yu9kTIPttUr z(sH%Vw{{qtO|x!pXiwFfP#jd2lr>(kuU0OGY3&b_@Q^ zw;2db^e1Ro;)9HBSaFJ4cNq4Os~xMbVT+VD5qdrWE6I$5NqNTq;SFc0DaNwx!N5P` zVnfHpE+U&m--C|3=L9n;3(1vjp-0#)u|YN3Y+A)M?Klt_P*c4F{Wo*u)?<-X6)qkJ z50{;PTecqz1MZ!$1p3LlczFb9W{oduB}cMKtv;9GVIB~`s!}gacd!SU;0!Y`n!5YN z68+rvObpm2|7?sm_oN`XLck_+VWPD_>K+HLP>}Wv&G(UqT253Mfn96_xas9Dss0+? zbMwyd{iw<}DF>SJzxK`(zJc6xG3WaE09$l9K{?(@k!TgL5OF2Qo9C60xWi0~fiH2m z_^AI16XMUHcKEF3TLSzj^rXko1^lVJ(5b@p5q6hYk11JM<@MR{!oS81kiHL)BKP`2 z>A8tGjFCdkui{1HP=7;u{*Sjv!{T;Y4*Kq4mTeo4GMTG5bmszR^-6q+3YI^AW}Y=g zkN+RyABhT#dN(`2vo_Lm6M9{F*sH)*vz8`#hBAXde-pjS7lyA~K7yH8Sh9nm3Q$Wp z11!H7M#R>*o$qGwKPMc{1}T1fp8xQP$3g%{q6GSWGWZiZKm5VK%5*3Ddz1LTzuEc} z#Ew?`N^TWQaRLxMvfmSGfKBJ73MWI@t;wZVv|?35k>su8bbJw0OR)qC1}n+%{kqxj zvr+IjX7V5{(RUMVl}+o54w-o}vrTDIL z>0hD`FCnUyv(G|Jyh((}=PA$|JRJ2TQm6*=KvkH}pp~kVj+Ya!(4_sOXu0mCFHFI! zLrvlDb}@8tr7wRuC?sKsAk!Qj;=NQ(PQFh3L?kPhETmQ=TL}tuT=)aDRkbem5R=5e z_t)zM*k3O<2W~D*aUzJDjxk zgnS+fT~>GGlL#ShtOIf3db3+v9vj}FlBAHxuW<~^10iPAUW%lin<&G0DeU}K{}i3P z){qNkN_0TXq{P4&i4YctbXM?S>Q+c^GSjr0EK?uQ^HL>p8I&<1hkK$zE`E#-CkE#+{FCRgAPrpY z8Y6{%kGC6hiE)BQeuo6BO1Tv8H*jTP5Ks`*|0~FFKrW!;i0<~;KA{Nb2Ubco9KZoC zwdP;Tb3bDp*D$;oC~`HTMGhAVLjJlbC1^#v*W-Uz<-HZn`Qh_>#M~r+aVo=R(o_Xy z+dXL&b18s2#2~ZPRYEALU#=8g9R8g6fw+o~9jR9ZY0KGo#qyB61c@TjQ|Gd9fZX%9 z z6MPl4gsoDcP(cD(Hj=y7-R5xGabCLwN)9`HfvJ0(go6A=8fN%$iUo91^sz&m4yoV% z&O_Xu0o!XL>-7*Pc>}-}$D3jz6B&q?PzyQgjA-{z9*@}5Zuqu29z;weiN8)qqlDRZ zd|T;g3GP1KPQ9)U>ZUAYS!C%bs~=i_HXX_EH3V7;&6q)lJG5TWx4ugOK}JZSB#rNq zI6(+R$4OL*-Z7asGQ$1Et9m{{{<9wBPhckqYQFtwpr=>DqHy|vcSPpV))!DO|Pq9Ronw=Ge+l292E%2L;X7(2PvOR;4 zJ}0q@893TVw8~Rr1mY6o;S{1M`nB>0QL%%o61MnpkzqgV>`r|{mp?i>N1ZSyG5kKp;s(i$HY<5UoHpgIR=k|3W)3e^Q9LQmFh+DZM? zcd9&{Pgb%L`La9w#Cp{sb*ZZy*{@sb@P6Pxh&<>-&m*>{x(r^dwEYz5n@^8h)UI8w2_~ zb@)&Kb?nzYhJFsBqPz_T_3n8pD8C;Lf#jSOD;8GWUPz*-BN~4wHRuvQBYyH_t^@_&?^B_uSf1z<5Wd4eS3vX}Tw}755M=MjpT_8W#M@vJO zyIfNuSzhZ*y?GU%$79>ukX}xUjKHs*gCQnT2pzK~1)X^_^or5h&M1QNWgX%&3b)Zu zKlT<~Jorqp$^T-t&lU|6$ql%{yH{o%1d&2%myI>s+WS{k7bdB_O4EThDTojc5%-yG_D)qfewvFhS^ zAub;7gLp4)9cUOO9Qn)3`~$)FdVUj#<MdtQTMSsWcnu7Tqoj zml7I(Y$VdX1^QWN8KgInI*kAHd{yH7GI6%lpd0?!o`RKtO`O;uMLDn@o!VqLrPNr$ z{^ENikohI@n@}ep?|DH9y4Na2ni0j#H|m^L3k(52+$Oc0B*3mDJ48_2{w88uA}-df zEt__V>|=c1MFEWu7bc8U04Y9D5*Bqn3)gKt5CWJH?1bYsgcdg zFGug#8S{0kgle{Z2wqj99?bIQq{Z3*a0kelS4Nw7t<#bO-!2$;P^Y>nbh#e|DyU9l ztIB|KV}8`9ddJ;J;3bfT?qB6R_KiekwVbUmH88cI=lP6px7HJ3J8Mzh3wZY&>@rX= z272F&j~~~kO;R~QXgfcn ztxF5{#dHQN?`r`d0~k#gMa&Af<5-w3Hv#-P6C}1c8V%u%rKmiTFXvIY@gUuNy;Cf- zq~H->vEM&-3a8^Leb|bxp1^65*EP8**R}efK)q7(-Xnp6a-*hn7N{0$(2mBiJCukX zN?NY^iBtnUjU>QTU1-igGP>60i<-ArBP$pUL>zIuqu#S=!ai$I!AYNq9och8;ysp0 zkLwGya--f+O7AyCsCZ0@KY(EE<%#v9I!srpuqUT+3K=4~GfnQUm8~r^#NS%)U-Mzn zlg}b{*D@G)tu)Pl*Wq+U}6a-+)C&w z@MylKIByU*1Nw74OzE+gAujFaxmK1&f zMqj>9^~v*U%lS2+ER5;>KrN-+zKz{yELLVd&9*gF`cWtz)rbb$7+sZCxmaG5^$x1) zowxcl9lMm2yUjoWKYllABGvsg-aU5vA?~Q$<92p^Uj$wLtJu z+zKR6qdZHPm_Ru)6Q|l{wM(KJuTH|p?=E1n6JtsdP_Us2)~NxdVSMs_9{CQlT z3r!;-$7@5nh{EfcoXKJxW}w~=uc_hAO0yWv3{*pysHnM*$Dm5WVKx!<$mE75I$x2q z-@US^!bN$|m*ppAkA4cTaD?bQB})D`#Qx``kzPXH3lnDJrSwyIRZtvU?l;r>F2wKx+Qi7p(|JQj0v7vnDfdk_ks)XX=Pr- z3sFifU#PS&+5%`n@Kl9n`a!i6D&K-Qm6xP_=~MgSGg=QSH^x{rc*|^4bPHAfiCb$F zKvv(^_C@}QYHXW(<%!E{DOnmly=5NV7MQFhw>fJYv~>mw##-U6Pg*PnOu9%;LqqUE znl+(*-fTSU3>Ph3?|_~reqz3--hM06wmHNVUb$Z`T!a*5$C|O^++(=4{GBaLV0x0p zTuo^M5Rhiu;u@9QB=gu&MKU4SwNTwrExGT$dKc)CMh?>bi6eV;WR=~Q1o9WtnnJRQ zizlL$hx4Sk87TgwUjNzkzuq7MPTc{t_LJRR%XmVAoAECT7(pFN88ma1G7(1m7x7eSz}8ZeoVi5+CfE7_3nH~ zR)Xe3tqzI_2o;dxvKgSY&tRcQSyU1t%|ZJOauu?I64*ANMW-j%6y3eg8&m_XjlC#v zZusK%Ba|-a%84n95gF#8N@T9BdcE#m2;p$v*1JI!U_`s zlRc3d7n$}a@~ltOfsZid)J62}df^n1_a^gI$KRzaR9Z@K9L^@n>nBIt z#=#U-dNyYlrnX_>T2B6mBF*{>B+KeH_F`v@qPt=vo7zxGplz^eK6OKR z+ym9>A*&a9AAK5y0>gIl5D2l1ukEGO-b-T!z@b5b%pB+&se2vyQHj)C2MMp&y7I1Y z!OW+x8j^1>>);q9r)ghie>$(2;B8WktrUd1+JESr|N9Mz95VN98PztaB`USVV~9 zO>?uSr`OJQmLVLdowf7`BwZ3xmM+Oa(i1D}a|d}(OSSAZUR&M6?r3_IK2Rky`Yu+@ zNuk=Wl{J^{#uW?iBhQ!tZeMWhPGD~YD=AOa)kLgOY*lA+q8leeqHq}pUK^Qjb z7#}&qJ08rQ0tANHq3)~i0EKU`O24EuIG#Q`W_`QO%qjOpC(9sW!Qoq8OQtQJ)BcN^K&1KLT#JUev8gOY) zY@!eVWq~rPK3dLG%|d$0>(aY_R`rAquv(srG{&=>tF+@lZ-u85{Q27qr>m(1_^W52 z74L1t1~*u{B^C{6<5I($a8$40*oA`4m`vJ;--LzcO(4D9`kd|qH~>*K&IAP5a8-+r zZD?HilCL6*zKUhzg2$*%0fLz|FJJ!3PKy_u%nlFzK~-b9$8~%v?p(o+YgzvX1t7GG zt^{oqUP@)Ss-ErMm7tr8&bVMmHss?t@9A3ahYfUPO1FsuwLv#oENi}^XBwm1c4FNk zz9{k2r-{@95H8H2iS_3%m49zh`qNp0ROT@?P$?$pKxDvGXi4$pXnGAI=%Bvp!0S?V zxNUx?u;Rwk1oNqq*7wJGvp^9kvprdC)%E4e4^x|l7;9+&0Ld$NzQ{Ah<1uN{q!O@7 zY1J|~K9()~;;6VGlLEXTL3^wN#FtQ$_3Z0NyXFkgp6O*%zlb9N-!Gx%LUm6nv2^qd zwqkTW{Wc;|N~bpXf(y%9so+_?!o+4MQ2tq3VYh>Iyn388^=Xw}d=rBZ>>Kg7wS0UK!UzeEStPf(0lF%a6=7Cqer~Jo#5=QUlPzW!#j5 zyyNEu)BzN;{B(4E1s@sT52D?N9+EKLKm4Zvb$Fy;kGlY}JnE zM}htc{)$NT2Dn{inl!mQ5IqC4>N-bHK=>Z5w$dpdkk|i9>IjEWI6hubDGk(KYd}lL zR<2ol>Y%dx%5g(aA7q-wsQCz`@usn!jE?JnW5^{lhzKtfR7 zGkf}#2I$s}--1RX&aom)YuRrhnH*eVc8If}spwNis>g){IvkH|3O`hiAU6JFsNUry zub)$0IFVk3$#kkDU9B2K0OO0LRv$=p|6ag#<3yE(hMfUipTO;vUF`fT1V&CGlBzOx ziLJVCmr}bRVc<^zR{QVf4LxCZJ`VI>ADumb zV3j=to&nvR7V+Lgmin=n3S++r&$eR_ri_){+!`cP1>b5D0X;E2Fa*y2?)L=bK#I>g z>;TzK0n)bVTDzP%(5T6)(E#iPWS)kn{wGr!+8Nd(`@x35p%^!oZ<^O;kQ_eGP_qmX zHaAwsG#wmd!(oqS)Zkoa(=aK}Zd9Ld2U$KZeJ_S(EmXTXGRZE1Ak_u(4MvkHi8*Q@ zd3qTShj=dvbW`Cj0JivZtzfDW7R}$ioJ*pRH^V@|d--L58w_w+Vscj3wtr-m?e>v>aLFAu9RuCc zprSN>WP0EE%*D7+j?A;!3w@LvbUv7FyVDfLT^L-yN$t)yc$Ws4%xCJ1(W}49KQl35 zH+95bnhH4rYFG#&H_EM0rPbu7rtPvtiSX)kn(#`G%Xfers^uRKCiAICMl(@@EHLh5>_{TNP*nXe2d5giZq z?Lfz_LbHc^D~7u#GeZcji3~NB+YLo1GvTgJd5oJZZq1^HBX8}O5?0`=eqd>#!U=1x6lhcRs zk4+;5l$a81GE%WJZDdWk=Fa7e9CqH@GD2IPU;D=mm#8{X(rV#deBUhJetkDxz3V*a zmqN=+hzw>J7n=4r$LhH5ax}}_-M4rXOMG~Ww7v`3ADnvSFCF?Mp}HQaSW)4(P*+rORs&( zKQk}1;^WkUoH($-ae2Ta- z>hSSBx>KY}gp4g*5;|^aAdmPc68$ZU=^pVqM|?SUhdFAzo*{H6TDl z*`O8+uC$n)31C?N+Wj|DX|`E?#&{)3$FWO|#~|RvfPRV|(3=jQIB>xCQ11a?9rLoi zoI(L~{9z{fkh%p)v&mi!+=$Eq9(F+T=$1=eZ=IW6)2kFIzTIDLOk5FwwWP35#W4ey z`<(#nO)CDyWE?#jKZF(;O9H`SNlR=f0A3^#rr>x<5>Jv(RfX*CGda%oUAido^K*Dq z^($7CXQA{6GQe?0W8z9mxq8{&>6-^ST+xiNoN8ue<#ai0Q68jz5t@OtPjtFhhnK)9 z^wSqNkbOw%7`jR;N4NY;;6O?{iI-gg;*dU?U0;Q8cj=Gcyw&OcQoilk3GM zU=$uLwk3C_mWDMwJ~4! zn1GTW*>EUQZ9aBtz~pWOCe!1qN{@q1D$OgoUx^-xD)JKFKD60^BV@n&G<^-?EPaDy zfZnrn|4Z$1BBe-etGLRsZK24A9{Q9LrkrojFM`pC${9ykphd4L&VFsiMpMw&QAd&zD%hMVG(f^PJKdQ!64YW67Ma z8EZ{urOLoIFZY5?H&M{t0`g@}+!zdN4SzQk8##wZ3stvkA7rC^S)&%^xZBhx^U~m) zXy*7Hu*w&rILuMc%vYua^K}a?8w;v*gtcE=fP1WixH;8uupde!xxA34MDMWoDP>4> zfSE=$J-`?97N|In&JDt(c6lo)0oJKY=wAg=eyZKOMi;N!mDm{cSP^U5{YG&hPK#4t zK4(ecjk!F>nY#9QQ!jU+BUMB7w&=%8T7zY(di&f_+u_DD@visPE8Mdj(ze^Xbyq(M zsN(tme)>QQB79nAXP6*Db2sVCPKd)d+`)nq`(M@2Og1$6 z{<`u)*635r0~wdB8!e&cgNXF#%0Vd;bJcIu+Kz7u`GZ% zPTbNPqiMK?b|7*7Xmu1puqe@D9g2k)@q}SGPNNuSl5bQ~&?9HMuX$QW4%$rFy5Dh- z6u=&==AN5P;$f@HMe$(KEX^sk2_uK5Mg?*0E{9$K+T4Nrag$kY>V1 ziOkz!C5h{o`-F#st-iUcF27Q02N4d`|z~>q!`3dZtYduLL0=F_B-!uv_oTY9kPtZnkNE z{sYu};8V^jpihwSH5l7$bwu0e+SaXCPhv&}>Kyo(9I&o8GSC_po8vdt^%?w_Qfy2l zM@T?1((*onuh%Vf-9kO+s+W`Mc8rRj6KLYBg-Jk_mq}8zx#!W&SvP+A+pHRqzbb}a z6=ZT1uSO~!%;ei#V|2k_Jw0y*y{WEil|%Wy2x_aXK8UCy7&nLAk2v+l=u~nUEZ$o? zE8Vs722HvO`3Wp~-u&lFKEhRPiDRGyL&Ho+E&UesFw|mUI2aQTb-~l+>ABj`o zYPK95tkh?BD!6tg-7_~hCJhc~+Cttnph6w@60Vofxge{k-;(|Kei+N08ijM?HvsEv z;DykBfZ@lmoU4)F7*4hNm^6-~3(x)+bNxK!-grj8&z3lX1&bErQ1s6NoGvnWG>X`+ zM?3sst3?0Tu;)_XxfLh5lVXj0Xhg{Z?lU_#kSrfLEtRx`b^f0*!`r82TPff0dmJmB z$2Dl&{1WQoim(eB360*%Z-Q*%UvMGp}(wN1S+s)uSUpv z4QK=lA(M81Xs$HQeJp^xEQYGjC#+_ULo-F}yoLd#$P_=x3DX9Gy4{yf8K;3_&=WL4 zB}s}pOoore`Vlk-XJJ>w?PJOEld3Ws&gG=`fj$uPnQTLT9h)+lHINyBybx&7RC<>Z zW_3~4$$ziC@hFjH8xl<%CTFF8t19zt!;<_$DHS6Q`l=C7jPf4?9HHe8ia=`R7Nh67 z0j}_qP4-)(k|WI~!^932{%8bbl#)>;xmHhFd0A7TJZdTEc*)1En8YmV-MKxha|5*~ zI`{xfxM$a(3uz7DFJ(2kIsFCDOE;?YrBNYQfl{r;Iz3I$#1=dv2}&I(zLmRCE>u^d z^|sL>fj*PQ#jzy4CHhUdhgoy3!(gES%g?JO1o09l;icn6&x!t)bOrA4;@!g(MNrK* z+{6mS(Kj#{Ut}(t2%V}>zcM;0=ssSLKvCBjHPivAm!1R+U*EE0E8Y=6pY-`~I^R15 zuB6hyMrM2Ck{jfZ2*5k+kIYkJd7U5%z<2qPu_l|FHFzL3M>&m+odGf#B{I+#$F_5;VBG28ZD8 zo}j_q-QC^Yo#5{7+ACk5(|x<@*7}p8Qb}$0dS%Wr#xpy?n6|^E=PYvLKs#7A1d}fR zC=dAl?WSY2>n)QUP8Q|I9OJUYgSU{uC38pY=G5@Y)b&-qXF!Wv)WY)@Av6ks_6E0jlIMoLU7s2NjH8E;)f3PtTlWc1sM? zfs7AeeR)OuQ*q}q))f#BOF8RS3fj$3q>`0bvNMXay8@EaHL)vF zUswtR|8&NkQ1-CnYW*>Le7T;0#%lWW_h*Z=-QH-?mv2H#a1@i2}V=aY{|X=Y87niyRy$# z%WP%~u95iBN<%t+5u-tapk1uIkL5usYr}&9px2g&m zY~rO|1Y|w>(~nf^Xxq7 zc2NglNYcpZ|Le-#uHY8X)aQK-SxkdQuTI>ZSsKM9e_jg3!5}}g6JF_@s3{MDi zj(O1EBmQ?f7=;H)Z>bQoc3efd!i&EKmiTMt$_XGOBn3lZXaFxlOLcUw7_ORUcU>9h z=Z#$mTB1Ugz2$po3!elXHwkr-?$dJ{L!J3LHSMTPGQ!w8#PGyAlu`&ml}jh!c<>Wp z6p-vTU)13aee9mTe%#OCEc3sdnkvq`bs|)uyfb@c`AAUbhc%x=7LFXMLP3GyS!c1J za^DMFkd!$e4jxu&CYC&4{pYA+YYy%Sdxn>zNFo@tlzKibC(V{P9}@-M(s2-VE-%o^ zt!hLR-79P^!VROt0={$OI=xu^r?TpKRsLT z#y|bjDLEkzp4v6M&2{YgRsT!$=p=?}MZDBY+XlRiaS+`=rftm9W!=Z`4mdHKo0BF> zPyD}W1hPzi6gfuIlh*8Av@F^MsCDsh=#&SF6_IZUyet5ps4KEf6}uI^NAZt}+p!1` zxBecuGMNH#y+2XGSKN9vOxGMTLdH>r#j-|G;3>#p8dsyXF=iFYm@o`nBYkbCe6&^F z_LUoJHlF?;(U624ZfEd}hl8>|=r#kZ5nQi2e)QSpcG9o0VhvVn-s`dt8@x~h9J5Lv z89gd%-f7e-0l&Z2v<3yBweQhznBRIAb-4b_xaf0f-+)SJ1&9_3GZt$v`L}tmT3t}Y zNqW1mMrZu@D~2kUeLSP%TI97QW=dUrrNip02v{SDzy@4D`eYU_fay^=i&gg^_Yt~- zbt=TsWOp8?V#BZ^b?#K}wiuelIIW4Wp0-sV@h7HQXQvxJ7(T7gbiW=J5+Sb?tJljP zvkXhOY+ZLjYa->pC<}20aHZ5N{#)e|G7PO;7$GNL+vYt7Mmg-IPvh+nT~Ao!z(qd>-58t)OdxAZ%3nUubThY zjk98#ef@OuZRSIA@x5C>nvf|vwHPVS2^77*?P--TlIvpFukduhoTGBe;TvaU0!?ld7EkJV<^Ha@ZL`^v$$Dt) zAq3d5oTZOpJWyuA*v){KShau$rjoHnP$O>r&WH<;X4-O2$X#9S_F1mA`V49!OuU)C zPK6-%{sC@$DX-j;omW_%`i2nwJ;7Z-1HQ1hU8AF;yea3F<-hzF4-wG02drx5S3VN* zz)nv3J^ma0a%FxCeLVd z)=iN%}QN9KR=! zwKYfxE+iwYstT~06l%BWCSxl8>CESSQR|V}Sp9(ih~o55l*!*MVn2b3-E9;#{a{Te z7i3-)lm?<(!B<)6Yjjc*mx ztaP#QjqXbCF~lX~?1coYM0rI$rTjeR(xVd&t5#z}bW9OgfP92A%AOu_bsGKM)57pu z^X9%3#heDpI4wt1pQhU;6z1P*yS47&69zOk+?L`piLEI7+KR^SXX z*V!w72E2%Wx5aP*>(e_VB8*B0r%hhZ>Qe!`LF{2AdUN_Mq11lYr}-ntz2HN^gKY8v zz^Sfam3j=|ccMRA^n=M{nEHcwO$nzV&;8fEp36Q88hGk9)WZkWx?6X5RiXjuiT||6 z{=d!$5@pyIz{6C$ZW#2Pw=wv;)#bg*_`IR=Mps~30UGsj<@jH7LJLQ1%h$#@b_*4Y z7$2FQyi6=BiINFILU_#P-H#K^zdKcrk$Wx9etj+6wI$Op)RzAj0Pn;Huf1jbTK~Nk z;8K~vTS|`#zZRv&sjBW>z+0R$-D-nmxG_hfl_&=YCgyMg!Tax_} zB^;IJlLmFov##v|Pwsrqdli`0XR~vTU|T|fL9#HT$f9}sd+v&&PTv#5Z_ zgH5%zCPP+Ph^cOwAWDU6wVc|2Ef=GU`^IBqh( zmfC;%2(B zI|N}V4W{K=gk(Aw=kVSM|1{;>i?7=(e(;xe zyu!b)Y|pIjNNDu4GZb-3Ez~g2jfYLGmr$M8ir=$215gFd4EYFZ7&ee%Y1kP!4w#0k z@0Hws*kYHBfE)SGf6yrj6u{lhRYMvBs)qn%v!(=Sl_#3%Qg(BP`QEloU@Gbcq*dMl zhV_Y*0sxW1KEuP}c4KOUXyF+-V>CpFzd9rS2B4-=5(gq;JBM!3VRuO@GRrh68fsfA z3TMlr#q5mRZYz2HGdN9PeuqdVP?)MrNCq7-;^*HI; zil$4kphQJIBeS8`PcI53Pcdl6=;J)fPbH1dx;#KfGB zxy_G5`AVW)0Vx2i>RjHd@zx26oT%(s_g34uxA!UWzh@0G$^}QSKb2x-|Cct~*Jdi| z)p84EF3mQR#?>xw%sNg?@4%M}2QOcx_uo|Me}KM83C+LQ!bL>>F3`^am+S$oDq#~> zk5Pcd>2Wh9X511R@QDS#K);(cTnA5vE$?drW3-4{LyPp=`B1g!#`cc8YloPTI$wRe z$CISvZwc=b;WsVZ+;g(LowT2r6uL4Cl4Cn!5@~on>1E^{5zU$cgmT)S`(qM6FGMCwqz%k0}*~$%#(J65OaliX0@s6znaefH2MzE#IZro&u8WF zVxLVM-hL7_-!xV0k;t{VIoc-!UApLhht+-r8em<gg-O_gVE?b=(vB#?to;?dkSl>+Q(iHjfwi!r1@%u-4gn#ERU663;sJja zAjyo27C4|CYu3Yoe9swZQ3pKCW4>bHct-gEJ&ef(*=n`9E-4}U+xGK0HnlmaS(`pH z#kE+9Cf;D|9m>Y(xUQret5(}?X(}In#);*Gc5D91>!^;C(pJS9^RaHD!!>dQscAx!m~9%GJhUXr-H5Mho5~W9m-d zWYSw`^FEqHJ0s-PvL64v#W} zmM4u1lWiomk0*|cFb#} z8|ZmsLRt1gFFm@701MAFK<%=}HS79N;8)xF&U=qZl9v_PaB0!6Q7 zeA8KZ>nLv)kJ~vh{=0j#Nr5N4s{C+}qesUFZo$p;^=dj%V}4)7Xrw zFJztrzPa8cf05lDdMOSC?EF2ZD~gpE;*{48G$eFhf8<<>g;|Z21Z87{n0t`$xDBhQ zyJ=>Pia!6HBUZ{OIFBp3zE27-3iCJIR30ZpPq}E7_$fUFRo{Z;C&rKB^wX)NNAku>Z(<(||Fe zGlR`U984+&TGLPWY)nk$T(jjx)QLHf+$4laS>!lxBjFb-=j0?YKWAP*G#a`-FXvqI zv|7K8K3zn{+#t*kxlC_9AEY>Zp6dU>NpeIJN0X|4R+gl>%t1>{?9ri_|5~q$-Na`c z5zZ!1*e71{1fBcMH>}#AXYAfOAzTYXr`1k-7-Oi^>kiLCT;{B;NA4qj3pd$JK@3^J z_LxPS7H@nokuShHO%Aoor|0+DYE%cTy(7lU0{Y6WJPO%|Ps`aT`+%r8wE7L9UqZ0B$-&}o%Sx(DuYBS52n2Vjd!83?;SUX___UTpRhx6^Uj|4+|o z@lrQW5_2j(8{o>t|DzKE#V47K34DUp1Fs2?=>ou`9psOTJEuIEN_}EfHo;#UfxR*s z@RE@)h?$V6oB?3-qVnJ4Gh%8SUzgw)`X5Yz)6mC#Kh61{0i6~QBDj)9GhV0<;hk7=|_73?O>p>loDLanbd` zJe?gO^a4nDEMYv`y{_rlON}0hz5t;R4*OjR1@NLcY!{5Z7EjRaW+A zr#+&iri09$zVr^4>lVhff%vkHTVC{aPWZhcYIh`x_tRPZ0mjd~E4pfD)Ctv43(ZA%FH}wPS=_Pj=G-87DN(=FfY%cB5eXajI7^fYmqORuiB!b=^E@K|vEY}HO1M`=w!wnRX^PNiM*tIk8Np`at$6;|!SduW^8>r+`;vmU(jK!qS+ zDH2f7=#vyl)p)^R6BrgcYv)hkS$LE_Yi;`bkww#yD1tBr3%ys$^Y*u0G}ZH1eKF*S zAlw4l?qZUQ@0LT)__f`T3%Ab+pM(57{{GW`;Uy`IUvr96T7u^QQ;ojUx?CiX}*CFE#H6B8P8XFzgvm4g0Zgk#Y8F=)`9vs`M&V zT!QYKdm~6->4}$ZP&L6Y;xU+DzA_NEo4U&E6Z_i@!b_80BlC0SDy2VQ|-e(T3iD(*0qwV)* zVf6Z8_LdG}f;!tc=b^1B$-A}<_^zflpdt88 zxCy=wrU6%wAcv+=X60Fhbi-o5C#6V()JHY04TmvQDY*zV>!s@^1fHd=Vgsvh zk1IN4vc*M>8sYl~L>U@C0E98xUvnSxc-N3y9M35M5n@L92avx5rXdLLGOBqGz8DW< z)DYZfzfqeTza@0?)o$qYzFIOZnlb|+Syp!d?jEhT-;YbY&y4)KiE);s3pm)iL_x57 z(7SB(EiCtdA*WD!&A8y5!lTa1`V2tc@u_M6mXz3?cd&Lp8C(Fmujrh8$!@oe(WUM< z=Zrop*^m3E(Eh#Tly&y3RL=@A%p~T_<@XwA!V7r}-3L;rFJI8UdtwJAIb}`dfhKxl=N{HtVPtOQIF>1D@y+FC>VF8`yT)d*3YYPE#}|@BA1oFWynO!p^fmd- z7_k@y_uIL*bK)IwM|m0~A^ui3wJGSi&nSWaobt;N3$j%~3E5jAppLMk8OMrEGWtno3S ztFs~$$!xdt!*3RIBBuadQ^Gna1JA06Ck`eShA+prk=HgrBop{K{5gD^iNA@4Tpcwc zHDY5iL?4o&radf;LbXliAX{7;Epvl|(v`g$+F;jA!$AmSmi);ae#Re5HeAow^05;~ z+8}sYO_1aVT(xrDr5_&D@cg~~tG?XTb_X$*28`~LTf9Phihpm)AV8_0HIRaYZ-F6@ zpn*EL0i3IFB#wZyKHwqK4I=MnI37D}`~iSnCy}f3>%Se2+v&6LQN8H4Txnz(OXpAV zO1y@YP7#m>6}vU-<0u)R!jo74y5T{|yQk3}i?5nI=meNz@(pOw%s77qd4c-Y3~f|o z3E|}s5P0G@@cY5C@IJo*=qX3ZccR8=qJsqfJgOyR`lmr#O}{-;kFP234WK`)&z!K-T4CJWurOul90ICkVc8}%-)4V z+i- zBoRyfZ*#yM41<)&Mgh-Ce;o_uw>1e({TI7IzH&((-rwuJ@fKH`n+Ckd`;KRE0Vn5jr0{ac@SF z7mH=%&4Ah~u>&|re(fOJi_5|yfRKp===VYsbMosNySJjuAT)a0lbRpMS9h@Y5Ml80 zoDaTRg(y%&m@peYn(=gok|})p2sPd|VcPaK6WUN*?%|Es9gN>@p=PbFGLRO_YA-S5xR{ zv#;=JeBoxd0QP1?0Y&r94UOM%UnTI7j99M3(X~9!?8^`S4^x~SsVbl{258p{0tC`E z*L_0y?W*koabkoO|1p;~S~Y9It3fgyYgP6_whabEJ%vX=gAdsgQf~lqn%3-P%kp?~ zc$GO2nkUR;E%9Ul$DJ!GpX(2n^g}K^R3zIcPYhJ6dj=gkEY%k?$0F@M808#>%Hn|I zq6uN_*SU<wptnNol!MIpiDG;^nG)GGEWo>r(>HZQ==G z9F+{LVJD$<+lXZUBJ(tCs3aztlZcIL6l`JO61>O?%D7+esxFD6`!&rBCP;>Q=Def) z%-b#;6D?3mMmFt?L$>p5xIBwH*Y#}nB4*LPtf7jacXRM+XWiTn0o_1~2QgC~zdwq( zM1s{I3CmG$8HF@ov@lKnUgxIYc*1nT%Bq7*GEreF5lPSwLGLHOYqtN~cJ*w)G6_I* zo&%bib|fTyCP3ZCxj`9f5cN7Ec3k|-bUjY z@Z&;gMaDGQ3VeKaoU_W@v)n&`LGd8aYPJac>mkOo&D9sL(K>l7 zl8QV~VxTQ3Y(Z}^L5E@nT&u>+l8xk^q;mPRrw}h~PHIKL<{m9baaV@7*ua{Q@reX>~l+(D&*+RW8?a{7B~!~yE=Y%>3&X?(EFsl`Mr#=P~&TN`y@R`^q(2gF+|b>fJu zTsY3Iyjf8Rgh#6hlyBYy5H!jHEr;2#%;6AB3vG7aA?gwoiyJ8bYd~BXk=h9-#;qid zrKj&-VvDlsz=jmWM$aJ=KVnu!1Ml`7x_ljiMHq z+?~vUNXUbFd|+2G6MmWT^@!-jTi1m0<<5h4(_ZFo=xXyEpPIN4%sEi^lv1GiBZOS9|WXHX6Qs&T;et$R6UkIevG=Q>2_)lDYdWF7aH z^*==Y*-yY``wNEP62>-{C{D~w$c)61{SWSvK8%2BGT4rfyfx^Yl4XWZYi371iqjeC z=*&H(oh^ns)puuJb*Ycmf4K-Td**~CPqfibmw9Au1jGyti`Fs7{c=F4&RJ8hc zVf(+&bP7!J!l>*f=@K;T5yErE6qjuEho9CywH2y)lX`;5lO=qPcy)bYQxk=$@*3$I z1UFk`l)(YSZ(LM+@uH8Z(sa6?0+7<0qYMaVm)!mcvU*W9W>YEmt3iThQV}ni0RzY; zoZHSIYK`P3JDl~vKeu7PG)ObhNIU{C{DfX0zz&i4D%D7?@QEKY!&E;5VLVhz^3Pho zGBSvP?)730DZ=#UlK_H(6~7{6&?6xAidVE=g-F#m2SOYG{~B4M#UH?8q8)I7^QW?s zIV2<``@H=%V!7UeZhHFK8~0--10kw7|1u_PE72`&5So{QkUhK)mfqo_DlM$uInReb z9YYx1oJ)?)^aJdjvP>+TQXA{vtv>Y`pqPeRuS(}KVG+!k)U;$U3M|oeVX=&C<^Um;9hrT|j51$!Qf-u+yv!u3D!)emGMx*130yiH)L*p<3ny{Htda$3kK2u><5F6p@zyOlLcJ4UaoQRc9S|8uzN#?$P_|xP8g^ zxRb$82dLitD5+z8kGiKQG@zA8FE08LMNKAiA^x1!Ld&_>X`lajgn|}2NjNHbz1}z& z9*MU!M%#NlO^nHFQ5_A2%^=fkcvzaL+^knfI0#o@}J znXMJi%vW>A>2T?Sr1x-|++|mCQ5El-`PW7t(l%ndm5p2QY&d$qPNeh03A%r3Q##$f zzIm)gFeDj>=5_wLS>+B`1ci?QKDg0l8r_7uLCU;775xA+Kym7_G+;Vo8YcbL3y7M5 z4IB>Hc{R2$_2;U2z5tpSKWlk@B7^Oo$L^0YA-k!4scbA)vuF|8Y4<3 zYHDx>@K0Y0q%VywyH+z53SE+#?I;=uL+Uh2)r8f06Y|v}CLGhYMJP7H2hrmH)^7MG zJn$2a*#?qlmO?-db@}Fr=y>ap43AYI+-0;1ufN$Tl=7;JRu5m5Pi zn_2oMwHuykPx+Ha;jL%;y?>mwna7Idb4Op#2rCzvD^*fQQ&E2iKbB9o`!ONR1ZArx#e>;(lzG`&*JU+%Gx;#VW<{ z$U5||)n~+De3oc0f?>$?mTT3F97B_~?`@W|8Ka5$>GDaJ#RU^A$J-5u_cQyZN)`XM zuFu&p8wY+(%gKaOG7z=OlEDgx*-ZhGMq!FF?8Ew@x=yL$0&R*WXmE9Hl``!1##3P(&dsXO)7q?a6$N?isDSMBK@tZN z6qO{xtYXq1dOHYY=tFfy=jDUIrVWIRq7M=@n5enx-NajJ3WBd@mP1ezi>a*q(uCT~cHCA}pL`npTfB(3tekWkcKHqeU}61YFoO+}L$GPSrYo5Q_d)sk@So zYVG$}Nx7qPufU6w4zDe#8Zs=NAZf~5*~hGDXkV+4j=kEwEa>h1;nU=~-EFBJ9e7tI zvV@ziL~3ls%%leqkPvDe?^wM315vyYBYo##jviaI!p5U!m_jXJv8)^m2AoA5aA@v z`_o8;!^Na9P&>yS&~32;h^Q1WSV}1@Lg}|H>CbMU(f;iY@TH(1d;vNpUP$Wmho-J= zIzI@x?P289ToU|Pn2s-5kOB}9fDIG}HB9|kws!!4YwI(RU) z2GB5OK(}iP_hyJH!<1Hd6wPIYEFEskv6E-!z#4I)0D!U2LJ?ShC^F1m5P zmRh98hlq;SNDq1G5W@aD<*M9<<3|@;dA-eL``TB%D^6c|hIoCJPfL|()$h5EC>W%| z^dm#fn?qGTJz20lQb1V~z12!Q z5cf?yOpoKh-?>Zx2+dhE5l&X+_LqO^u~OT!y=%OWkyoPZh7z?FK>1GW;Y|(A70NT` z>{pXK=J7bk%5tr3&%RZ=;6tbXi4{Z0!WAxAMOFueNuJ9`H@TOIeMIY9L(JANk#Xpg z1?O4bsNNqiGm0N}6MK`(Z8iN_p(_JE_)p{d}0UwJpopbJ#xa-8^-?hJhYNC_zK$XPc{bO>=O5%EJIEV2Nb>{w1sVym z?~P9g+S`q~Qp^WbRJPE(P%QWus9@!nS+&@;(4Rh9kk} zdhar0f>;Lh=E+~EWVO|czMNL_+J78u_JK?BDt#msk{2+11~ma9776qN;uDz{=*d6x zW`h4d6|`en7r1f!k3tCwrTq*d8W$o3S>^gGWb1Z#eG+eBXOb`(iS#&T$Re&K%>%Fz z*`BTl%tQ-%fIY?m5HORZN^qF10T;Uy0?82&7|#S;y;DXo4yd{y5QATj^XYQj&owqz z;n(w$3R8ewpwU!2`4^arU<$w{z#j8F-Mq{^Lb^aAAX_%SW8qaAU`4UK0*2C0{N&`{ zc6vNbztcN{H;_RLLdv}+pg-l<2SiU_U+1BGOfv*L4}czZJ`jyZMF81^UvL5wcez5s z!fI8NeDJRNu0VvPvD2)$L>$M3UDxjg#OjlIiiu-K&LxwYiox)+fS z3d+$%?4WJ2(KeIO#H>So0DkbPAG`4MVm0#{P$hIaBKo*aq-@l9t80C-W2#EAWqhoCbf3{PT2d)PdB}I_*|+X zEP{c660Z=U)$gazePZm-dA3}ZFnT3IuD*ZHm;B)N8#I+`GY(o_YV zC}m?{fw8_BI4>fC7@-z4R8MF&9rfm`uoSKGIQ0Y#&E@WcJz&frs6Riu>W+Lcy4zBk z;W9jiW!3lSw%X*LaYydk*yNJ+QO~OML#Jg;5bJ?2yATo)mFL5Uk4Z$QA&R%3=Y0vC zl~@7K5T2k{Yay705gk1y90~=BK*#6URUbkpFdpoKBzy$Cakd&QP7J%FX30A}=!l^x zAPoo&2xeR^n@sgfnwWyge<9}1DnK1F+XsCU00MutKqQ_4kr%`%zYjRO0-c$rQ6{~B z$xVxx;0bM6cyJG7r*H5YF#Lz4Q*B8Uc|G(fH1?}BdLF~Py=|78K(38xs%N!+t%rpuol%gz09!|UY3 z>-W(Uq|vA?d>EPshWAm1iR+6~hxhJq*C_){9UCo38|w>W7wQ3bV}28$h*fHG_(`iy z{YMf5DfeQ<%i+m@0AiNc^%AKDxqnidHv+FP$FU9U@cc7dRBUvI#B!EcbgVd7vCGDL zQ_jw>O&UV)eAjXOD?$30!WS+}%@i1D5xelul=(GV&@V;TWPPL6KewBbja+eZZ}%r1 zX#%X4J2lXHWQeOqEgwK^R9Mkei;}qP)_vRtFZCU%`CX z6lmK?>=e8;xs7yshy*ibVTCYKzC9g_7MQ6blz=Ez-mmQXKdTsGeNqUp_KaNniA{Oz zIkbmLwdHkE$gRMJF;ju8tF= zC;xzWqNL#R7?EPj3>`Qy8z=@RI4Jg70_)>+XKixOqAMFx`jTNfn|pHC+^1GG2ndR+#41B5%+H-@Z9ZtOj57y*^U6$_ z6n#u2a8Ui+E=zu&xY&1rgdp|?J&4a`_z1d_qf=IG#BkFHuzZf9=)DPpgbD#)VRs8M z)-7792tTOpAKC%JZX0J~3&<)5im^nHp2dF{t7Z3@p9Rb7>1AFpvx7}HwL^%@Z2?-S z290~F^L?W!@`ae5}4OY^$f^{6NDjHGyg=nBqaHHsaVIjm$_cPks@?EF;?1vsG zEfdf^kP{G#2EhvdgW9?I4KPwSJ>Q?pX8HzgSRawSY5Lvg<$=h9?oR>Wt<*m@#gf@< z9gz4dfp<$ki!50f|9k)O<`;2c_$4QdFDZ2A5)jseuhe|w_q%ElP?YXzA!a)P#mFOh z24(qTxHNUT6Wqf)L_C8*9T=+V+SQh#i%08Xt4Sdp90LZ!rP+)6VHK^q^`*0vLp=_C z{h^=G7cEfMZ+V#3P6g0PFjFTR&_pgc!C2QfN)CB3g*(mlG@@x5iF~ScUz!QM^Jjau zv`53grBj_wrrCPk^9+m!!pXn6DtDa3iECs`! zGh3i_llDIGW77D@ae3&CowW&Iit8c|cgts6c`7mpzIeswzj=gpL`IX2^N}uz@IN_$ zQnP=ydGKZM$1#A`ex9(QjPGvtwYs^pxpyarEk~rcNkk&PiRtq7354CRc~r5Lt=h&> zjIC61Qa|hLmj?{isJc3sNL)}3r?t1}e6dRNbs)u7xu_l`PWFFW#*~h8{p_b!5CfxR zocybe#?*8F-8F)t=ZrTO+Na~!;Za!URf$#Gi{;$AkBYSfuJ=?_v)%q@?dr+gGBTr; zA;?iN4L@HzZg!(V_&l0lRDXy^xx3>%=ga;MQ)-L3OfVYIntpfv)4W=1$v78bzcVa5 z(OQ=zEP`H1UB7~dbwql_1%G~_rA&}A{f_QquD}sRydU+8b?9)=O81T+9Cf3#NagvF za0nt3ktpq{a|5IbL0)gVmRfJM&R7ib3&OgZ&-Kc54hdgCs@DSQwlb>qq8lD;8L)uY zYCy&O>jSLA zH-8xj7k>`?S70c-#77_PyL)$-1k(6^@Kx)sik}xw2b8IB3h(c(g?1@r2Ix$>EgID# zg+L%XNf+I>u27*mW&i=H2hS0chGDbNDr=#>~M`+@6OK{o{75Lwd-zI#qv2Daa>x zq=hXZNoR!h>DtKY62u?Z;*6@Gtyp&6Adl^pqq_RNnZ2Og$N2L_f;(;Ox6@0EDTvB7 zeL{BNGZp!$)|v%_e2vj(kUsD3|DpNg{(CQW@DmIA+y3_n>J`HDR92Jjk)ohdwoKsJ zu*a(uam-+MXKW_69*;;+f$UsftAxI9a$Y&svp%4Rji(!)4FHO5*g6MQ>r)>Ew5&bK6zL>O!+(gsH`%usB~Sgb98 z+;(B3f3#8z3t8XAwy~`p`^QuxwZ&r$;|(#qwmc>S4`@t2%dX{~t0uK88*PhCB!0me z+Sb_ZVQXH8wwRd7v$(_)qPt#P?uNucJJ$EF?@Ifwh;fX%{>5;q>08iH*H;m)R^Nuy^3d40iBzVEv2I!QuBmxrCh z-JcGfw5oR>o#|wF3H-6pp-mu`O*AktA@;pjU4|^9M2aabru}56MJZFPCTz9-o-bP5 zG>_K0oy(O>Oyt9pXE{zeoEopCex6Cb zcA#Sv_NM)MI5;p07{{?x!Tn%R7#txwali!h&YN-tG6c_rXRWuH+Qw!>38|`K!tso6C6eSovdB zbII{CiR&sA<#Jc zR8dmC<|L8wVY#^HYQ;w;uh7#U{K<7bq^28kiO>g2XGk$f2_50jf=lYF;8LI76aR?u zDN)HeR157eGwzNdWt+=QOU}bL4&%0;Mym@;ZostmaP2gPpnTNd-kB)Y$1!8aw%$cDsl*P)zW_={;45KMA5?@J$$O!6EVJ9LTWENAV^RDqD2OuL(#nh^Ja1U6)X9 z6Q3gDaa8-5Q8hOyWs8rLKVl1!VwHHk%&;xlJC%ZXHea`ouh#v)q0*7FKbw8W&J6R) z^CV|4j6G>fPWFs)>}Y4Zo!h_VHf? zW-;2z-5OP$|8yOBTH&4_uny$3qr8<*uGi;)5ByBz-y(j-s@_seLL-ha_#Z$8|%*q_R+ z{m+8(&*!SAbnS_G54wD*rgY=i#M92bt)YwE=G^?yKmDVjH~bwt|w9;LyyERYv>mUl_RD5Sc%39QU!u(|@9N z!DpfR{k(`|*n`g(y`ByrH}9pwqWE%utIE1H8=uK16X zi+>hs)-nt;m;2i=;r6JL0?^Mb2cr#l;?{EMUA=r9`ovW<$MZzasCoF2(}5QS9Fwl zGSjoIx9xYV`tIQqCkgcYJxSn%yF-{g;c%7ssy0vpX+ zIiJ&yCIzRn6&mNjO+TkUA-wfZWGRlf%UP|Q=a{wo2Q^z+rTO>N!;5I1p3|LQ zG6ebgTW(&RJk~ka#Ukzn9qnnIaJF4dXYd{m>LMrtubc_nNDqRu=)}uYDUlaQ(o4t? zLvfp2)^JC!-)a)J56q)csSu!r6+diuSe?LaOZX0y#&)Gi{&KOTDp%}KIXB*PP|I)t1a$&o7g)W6lAi5j+K|U>Hv(~J_@oQZU0ru_s z>K=@-=r$x7k&i)dxRrd2;ScLZz1T5h4IIU{HE9k170(lx*S#-BcN}7dn?3{}*3x9aUx2w)-x+1w@brLFw)ek&y0`2I+2)21UBNQ@XoB zy1S&MyOBJT_ub#w`;4*284H&V_ygBk&wS>*@B6xbm&c9S-syGmXY(rnppb*itBTd$=*GwUb^Cze#u z@wzOqhtTGPe@0*Ab%`m=3_EX(#_80%`|TAMP>W0PzT(!sT+Iz?cK_hsf~4=2R2PQ# z4l|68csobc=28)I&$O{k*_`)F6`~@?vNS%yDBm&;!_)XUhI`-#H&19kd!=)ik#!5^+LQ|3!GCFV zzB?a1_Gsoexu?7+s5ePCtk2QX^(5h5e4ZXH09}Ix{lUr`%P095mrv~1+1lsA%Wo1k zw)%o!d%5G?j|gkV{k6JR_xIbF&VQ`zqLR-Tn|U-hJ|D*lLy>2_=7>7y_lO;S-VE(t zsdtOU!Dbd$jlRa0ANy6)4qt@u-^+NGjkNGs7q>oXVX|tR5{4GZb$V$%OK657Q{z#p zTeE;aRVOQ8FXecroq?q|$S@8DgU@z99%3qW<{nA!)+Z6d6ci9CzZxyEF(Eb-H>zZE-DQ-3qU z8(WVxlQ*wFGJI(je-v1t?mgV+0{8J`+Jh6K3hV2XHzIM_A7#E%-^G~gz9@EP*w>Il z#KG*BmyMRp)Jy*Zr&XmParEgRA)gy#pJSNGUBxGO>u3`ee%?=$S1Cw*`i-AGxYNh| z-xo1|`Rp3sb%8@#!v6X1!&E{llZF*D2iMm-9W+xm8izZBs#;|rvMw5`6nrPPMmH&3 zemxM;hnwjcM2TQm6GPIK4zC#Ucj#yS?r+f@N~Y}0uxCF9W2`m0s*a{tf?t}u4^=9z z#+?>DFL{W#ywb{cHyDS%ctztpd!|z|yX+nFJB;JLabXpoC8euqefn4ShJGkfzkxwg z8oy(iV4#L$dFd^s{OoNoXH#tx)+(K4-g3|VaTdroc{AOL(I(*4Si%Y-_ znrIq0;V|2=SIo_*gSeS4!?G>`O5?S`{u3DQFhKP|YC&wq-*fpt^e6V+3^A(ST8+B~P@e2cY1N7uzy+D?R!=U!>7T#l^(Tw`5bfvW~Q1?rKqq|5VPB zu&ozb?3lW=E~n=_R>d*$^EF#utXgR**im7x_ecw3ivm3c$z;la^d1CAhc*lS8e@yjlL_}2Y)!3^a+2E{QW>Y46|cA zzw+hlXjwYx@Y8an-JyZ;&8Fgoh8 zXZ@%0j?7|@ly(~PDt_pwW81#@y{5p(XBYN<{TOXbjqUTyByIroM1OY)69HS@`Eb}{cr2b= z&C7k89yeqysZ)$#-kx^ldRIJAoG(I;Bl|ydrF!;AeTb%d5&E%FHrKt=rZML23g%2V ztHaQfX?IEHeBjkxynp2-ryMGsucErsLJZc9%|x-~zpV`Z6eZPv_0}mk;T*@T$0?RK zJ2NI*Wqoz)(v*s=mcLHvsMP+zz9!+0K_&Jm%4$XJfDRqON==(R3eHD6q;9^Dr(13^ z^sK@fnsvQxBrEGhb;v8dRD>s9kty27BzfI7z2mSG-TFs(hc*vH`Pe7KGwL2=2%*^Y zL`MF_MXmBx+W>LrT7fbV+n9RuUW#m7JNHsz($d>QfX)r4v<(;R>TCv*J< zVuem%llMWhXdF0te#xfs_<7A&g%(6yU2+0-?@SSbOhYW-vkfdOuWBBk<2&yxHcr~v z)bB`=gbm)_RS*3d3(x(xf8QU~R^UtwT106?@aZIJeU=<8u*-`HnQDt-7;Pxqr7T{f z%U`eY{(W)JH+$DLmhQu2i}x{KU1H*a+pJ?MO4HNZvUTu#a-Z4L?II305ib}3*8Fml zaU18!uTUP%OIIi?(Nk&t?-6>;q65}xxB2I({8GJjen4|&EG5~IdPA4}z0#|%(f(hg zp~bHrYiiNaWhJ6=-G0=zB$Ou!safn??IjpWJK-4ku$AWG{pj@3d|H`;<(#?AEw#~dZyU+`ycP~chV=g z#V02es~0vVvs-j4kp4TDGBY zKb3_fHr!oTtllJ16?LFcR32VwQT$vv4ipboEM6S=&U*U_37xrB9KrU>oBi;e7) zt-2G#WCyRbW{HT6h#xs9sM}LrcE|Q^m~HHo-{;v+CarrN;R9^gkCdiRuosn+q5wZ#$-p6aY=At;Hyofs$3^k}}!y98C zrI3SjjN1qe3iKi)_U|wo%YNsm=RISTrlU3mzuP9fwHM#KWov`Rl|^>7iA6W-SwJS4 z{AJz%k?Qg7^fo2Yb0|yIfI~Ek>?zDO(G-(!4bJkrLsNC#3QpM|BjooODs(3t`F8Yu zplWpDq5sF*iT*OA8;U_cp2!Nq}~p#RtgzraBTLBKi&nBpP!{M_w!nX;M!-A415$C8Ts%006Dm&6cW7 z5w1u+X7ps2o{=3H?R5}AyuG>l~P#O!2rW5mahGyq~2UrFyQG~1j z<&F(tPUx4H&wo)-QAre^0D9A6TPczJXcM_vDQ8ZbNIfv{a`*iHJq~1Eft*$=EpB@+ zNU_0VN6l|7%!#D5(GV+jbzF8pm87s(nUvu2xGu@mhEKGVqdQ?AdMLZ@TedJ+K=55CwQux)*hT)~Z~Ljs zwG1Xo830SpjMDuM(Y|nW)x}rz_U+WoH!~1$*EH z{5W-fMa=o9DKhBPp0i8i0@O~8R9=sNuYfrCdsF?-%oVve1y(JY&Ow_6BuNG4u=lF<>gQffF=mEgt3!3U5K}kS!6o4c3%*&$y|11k3fRJ z^4U`O?YHy4pJ`rFb9i3qxNTv(KVHp=ejAVeAQQXCqBNQcWuE{Tp0%Eb9WV-VBo@w- zf&QQ@GR+fmcrm{n;qpTGn60UAj*|pB0Nz!&v$Inf#1^;%+-PlL0l=ue`+Y?OVu0w# z>Zs!9>j?zK>`sW2V9zNP59{shs~nv0 z{hLFF$veXNl7(ZIiAyZ?}liMky3cZ)S$QJddaVwNrejuY* z039zv^Gvgm2jy`33Ff)Z?fK3V;gytGrK!fW}dQ1DaksNZcCx$-T&X(jvmxa_Xlh9&m%PJ;&OX zBn%G!IUoU#Mj;#4XN3Bv3bH)^;%~ubW$NTA<`$1{FDJU4I?-FNk z?1Ntw!sbD}WnUXozOnt*!3Rg24A=GWx%Z2c(JvO+!@VIpj_Ms_gjeDRBP8a~o@4nF zJOmjYPmG7%rD8@#-49VLgyt$;=U1jF6AfN2BCWe=_p6@Z=1Z1X>vL5yC*O327oo0H>a*R5+x~n> zWG3Tr_ZgIBkGPC|L#^h?q|@6ybT8^Yd!c`>yu>w(_4E`a2PSlaGR@qo#q7Q})=VDb z@Z1m#tzwwHQ$FHTWu#7-=d*{L8eiXIFX=*fjmEvIcTRrCo}}k)=j|U+2L)Dsk=9zM z``|o!M&&aKP!ur^>s0Pi!ZuazVDmcc+0l%Jn4cnhHH*XT^LQ0cIZkCW1AQ1fBw63b z3JPiY($+50AHklNB~MrK8OZEtWY@- z4hgPY3xH#U_{WDx?Tn?pebd)JS7j_^$%Zt7B?$==_WcI5eZuG{cSgV6;rLk~>~bJ= zG=W}=qR791q}MO80pjBIa3;RaU4lUCayb7EqV6grIVJ=G8SmBI`GEB%9>b;{NC9Ds zc7Ymp3g8tPJNAL1E_Xbzc?;zY73s!~Ul~5L`g&G~04zK_GLhH$03{m$Oh8%M%vT79 zVmJMUe(z(5q}SWCJTZ;jDutmbHGOpJx8k@w;k>$r+XrGru{%@npv|obLH27 z`1qTu!<5CZ#Avzs@3P93mNS|h#c^Y?he#w2s1_2JR%RG`*EdKD8e8b8jVQ%R+bk#E zi*-1tMYsE3?kG73z9lH(Uz#+cYjb?eo3!GE&0qBm{w@CRq%u9pSdtP^i9Gw;CRCG2^5*)A&sdv zwgu6C$gWT@lY)aZ0vWe37P@b;TH=6SKCRI2MqQ#)uORK@tpj)zgHFVx zipYS(=-ba`)4wofTu=DjOzV0!UgM;H9ZnPuM8k>}lNTQDwcHz7YZ|}PudK5FgU-F?ay!(Ll z9qjB-qh((j6{TK6OUwuf*wz+)y#;EeyK{43nEH zSmZ*=Q9@Dd0_#vmeD3}n$01pz!$tO?JM$R0U)wI#nUm{oxjbr<`_w%0MPacA4&;8w zu4ao`j0HawKCVg;zW#uc(sRORpC;y68{y?%9u6@pwhxL=`(dEi?55J5f^WO!|L50$ zBsQ(tfP#X;qL=we2LxOK-~CGAF8m(;$y}}+E!JWU6El&5BkkMH;|8bw z9sa$b4akLC-z7(JOz!J&?1xAKTV_B0ygR8Xq(+XD@zccS<+l z8Tt3>d`S}F!paK32KT_*nKHLZVD+y`yq&YRd{G|1-xR+hyu!F9iiZoR#}YDMJR}>B z2$yGkSYZnZgmOi~dGh9c9qa#SZ%nTlDG5tP>Z=3cjSHgTWI|MfjPFm6d6Z8K zh9&H-E~_Qwf1s4XmyaTAY=7xRRiZZS&jUbMxm-uC@x!4Ao09}K)RXl*fyk)O)?L$d z+X23h3-K>UsESS$ocm1M5m4s7qoC{%_h~rIFVU#ZANl?g6bAJkuzx;7n(l@nitB;* z#QxRs#8v%ZkK*uiq2$wn`Un;I@!n##1T!7lrtM!oMC{UO(9`GpX$f2q>e)-BR&xyF38(JLPWtu;r3EQTvyP+^) zldb_8-EL(cF_zAxJAcS<)3cBi!q^1}h(y%G&18DwIE6_>J+YDx+|+@2JbQ^96!NYy zc@K83G>ackZWlT)jC|KNA0#10CNZA^UaA2Q=1ueDb;j3Cmj6ax2u1>-;}K+zVDkNd ziX}g;gWZmLyXI>5#OtFF1nau&<~(pc!?9^jT3ywx4Ca*Pr+mPyhfyTY7ZfI@I4Z<^ zl2W~odc==Ii)(l30wsrMN=WeB$yDK1YkOyVj5*Q`evvNnJ5NaUE%T}Cx%Tx;MKn;&Inql&M=7=1Oh1 zA4{D0L5^N>w=b_%Q5Eo+JB=J3s6o|y=JA_(?2ye%QmuzDc07oabHg7zZBkg27gyfw zUJZDdo@dfbADkHkAdOL8yeilmT6xN=&B&qEa(E{O2b;OGyLbP41B*`uv!B=??K zKc1SGb8ubmDmXgm4sV#Su`j%X_O5is?9b&b(K8xFrDk<=XS5htv4hMu6qsoO1P+xH z#{E-GXd9q14mWP`N9kpccnC!j9qS4cGC-zq@by`3>$jsajir6kD|U#cicdk%h5PRK zJ1EEWzqpAI$@N;wRt=&(qz)<{T-Wh(lco)j8I7_ABUHgIsqXU*qOwMWTwx2yU(o$c zu4mbq?l1Ap3kI>qfenk4A26t2;bUXbCVxf8XD}uzH1<$*^g2K-+ z6tL!+QhKVZ6cF-VcpZ$)cm5`@5boSf_jxRrA-Oaj6(5bvukdS#HrkM3MWITRjz>3m zldBcK2D$gkwZjMfvuc-L7u3c-Z$9-pRexY2=CZ%7I88kQ6bB}Xia3(WKqzPbZG6%v zP>KB-3eo;$-P+W3+M%l9b_^hAxh8J!oUxQXbc^74g8EEI2{H8b+T#3s{=A)5t@%EHyXHYH~pt0~&x!PQWRlN10785TOhU!w%bb zWG|I?g4|`gxN$HHeS9%Owtjvm8(+#hhEfVwX!(f&|6bA!|7^>;%B5BN$e(6M3m;Mu zn89fbh@{!vEjRReR?T%}(@>#1MnSMceEVDuW|_A7Q^~%q3I#o_mgRE$EugP3)lu32 zd$$@L!}jn`@p6&%1GR7Gz!u*Rrq?C^T1R_zRipxK5i9O5+cP2vj9I04?XNjnuN3j{ z&t^PD*07T@JR5MG*4S10l{wg0qopSDBN^k!{QCvPIU?}V;o$BjY;_L&4-I-_+v}TU zsUOULM8^j17@|D!iKwH?N(~Ssi&Q|h&D5fT19WuT{Z;PNawVtD@UK-QSz1YK30Aau zlnfsxaD56#ROP?rzFtQnnZ?w?soGy#>AG zOC+N=52y&fD_|(G&Xp7PI#SN|QQVi+73M&>)22lmg8>@-B&fk z(}lrHBKo@5gaVxz)*2@ySUs_Je)V?OV6scM{`v!9Q$un67XzC}(|usw=ASCTU@yiI zyP}=OTsy2KKw=vigw7F(<4vS~ujZUQ-OjRw+)n}78W1F&k?guR{QG=8QJ-UuShVRy zBsH;f5n;MB6F%uvzy{eA!MzebU15H$wlfOsNHv$`P~xHdQ_oa0h_UdK5%BTlyy72_ z;S|Nw$3k5z*=CAydqW*394`-Ln(TO^Fm~GBdvpoRjtO%fzn@%MB_wn8Sd6)8 z=lzuNszh41MJLJYi)XqsNwaL<=n9#4dx>&zSU)~1*5?=+{jPQ6!7eP-u%&C__II-g zo$f4&B9Zh)A`P&wM4|*E1eZ5U*>iZb9{m&cekBYc8S?(Uw-i3-8&u`C*g60HwLp9U ztKK-Q<#IX*ZMLd0EKm#6qY%4VaQW; zFw7jSz{-PaKZ3j^j#}W&ck?ST(R$uNg(Iu(m1##($LuTRf+{LBBN{XxF$43~b(<&s9Km5)ZEU9MZ@rm2jL%QLp;J}UEi}|0=new)m-@}_kx-py|8kW-k34P?I$5iH3Lv#8 zHvFj3_%C=0LLd64;##vY*X)?%Vs1*cyJkQ@e`Gpd`R|oM|227KT~(u_?C2R`o~yOu z%$qxCy*q(dbL-RtsG!=Oj4PtNV%KB*22I%h!fcv|GVF_(0J($7cWHwV`jRh=lV&C; za%fuGtUGK;>n76`pMLx`h?F0+7>mcjIX}leliexj9>Y2BJ7jP?s>m4-GMWo~diqTG}$Z&v5KzH1cpxyGjoI2{m^}CaF3+3uB zR9l8NGsVnjj_Gq0=sb9e*)Kzy+uA|p03f46wBYcp{E5_MYFQsWkH#zbC05}1EWHjV z*iGFU-hDl2s!c&ym^$I9ptOFPS`$mZg=wkn7u##jA_q5*crxuC*Hw5E6UDcH^yW=# z{F-GsdL35BY7ItU|?nRhBW_~b{&emH!RR7#yXqZI8N$HVJo#hcW>|-atvibq&&Ahg1+3y5iP{+ zOACBx+oEY@y?}gpzsRxsyIzctz`N~HQiDe2ePkPvwm(jJv&6+Smqop9_PZ)}0cv>@6YgS5-f;2b=6PRj8JCcjhy0_lr&R#KopLr%^_PeJkB zdXEuOU!{h zsf7GKVvkTjQiupCWhB4RCi+2VV?pZ_!t`p4?;^C{{pMKv4ir+R;EWz1S_O}W&xO6c z1$r4Z*2_G1UJ1T*%+ak) zt8*I9ckctuo)-Vg$2N0pq78aSrCM{9BCUFc^>h$A&iPXHXJ{W4D53cf83AD6#?HVy zy>!RB4XZOP{q@WZY2Hj0=lD@Btw|or>=cz?GxJ zquI*Z<`e8jf4BD<>)9}p+akC_Q*>T#%6{Iju;3$g`gTga+Ce5%XiUQ>)7F4j zk;PZt6g=Y0$Cyn?A}+TWYZjt_H`2=ArB{ZgJD(eC6QPp-$as{$=9A-O2@J~1#pVj} zl2}9?5?J8v^_$E{S-$xM!T!_UTt06xT6(cFZwRTRKc@IofC6lB`gMyA@14JEg%x%E zS2Cx{#$-B9>KBI(i$bGF;VqC9{FadceR;(IN^B5J0sdMiTX~iWn&l(TR80CLuw~Bc z6Dm1MFie@qN@QAO@KU-lA8xtSPadR6p&+*{5<=(7{*%{iVi^63AX!x)nc_}igOFr* zoMF;q9XFXJZIVhzP9H9kDXrm}io;483MVHHOWf`6md;$E`CvG|RY_(C}1m(}J!+Kqt@KO^>dxf!KGo!Afsf+RFr zY1sGc+rRUTTj|%oQUtxdCIp#+cHufO5=F~`0yVtfj~@U zIo*k*NBU&UAxvFmW#uI(>OvZIL}x9y4&JQcPx|}&7Xl!H-}f;@3~@KLUSIp`-(c2s z2^oi0eu>0}yUj1p0d#~7o`8VFUwl_Pe+6)^Z6iA5Gl4t_`Y)f|MnFRyKqlk>6F}08 z?LmL*;hxtsfHz!%^x;WBA42k>>*xdAlu5vEpaNa1OE6qRs#vV*j(@bhTyd=B|ATO= zmB?xm-oOD$I4p>j!AG(%eE#4UwKFD}=rz`NRFJq+0J1}a-$Uvf%7Az@O5^vcA^`O6 zfl1~2X_NOL7a3%BQYid<{oxnTCqYNTsl4C#*Xhs(^v}qd?24(97f5J-FFBzyA)gpO z^-WMXGnw_kJA$U^>k_i-S-qVSKAEV)M$Ph%RGo#QkgSE6AMp6=F>jB@o3tm`T;etz zXa-2B>Ic~IPr^)cTEq{qa|_&_;7@er84x~jza4lDv0@Zm#d>$O{pgik5q=Z|=Gx*l zx#zc4@P~nN{Y720^R3_1BYr<*16Gssg*&;(*XFNf0Wc0$JzWH9V$NvSAY~jbV37(z^ zi&Hm^E)Q16$Xoqx5O5$bAcdJ99w&g8np=?WE=k8@M%hxaY4Ld06{&(H2_ul@`voXq zvJN0Lf)Y$hvyC)}D(5RD{xwH>bby%DyLRZ-tU|Bl9Yy`@$JHWEMr0xvCRneNRf-_} zoxM5Y_jq{n!R5-Kzij1gf3L9iZC67nTa|?droHjc(`Iuo8!{{U*v+#cxtzjIC9PoibeX`$MVJPmkhmO!*X4!RJ?L&wzH@qvOd zW$-B7H{SR`G6@>fpI$?Bx%+%3G~Qkt#2Fx6L*$PpauE1IkMU(-G?>I%(2p!9F#H*E z4IvB=2ZL9bp2kiPSu2m#pE$mny0EhpqDy!M~%>S9cM7KRX z-TTRHVlE6d7v{mgto%Eit|h62cL zi8#%S@2GD>{!+|yXw_&j$G~!~>N|G`dX=Ax=ciu3antjU%4F@h0FjMIr1H82UaL_a z3sa9eEv}Ui`t`Uw;DJVDQoSy;S#7%np>D_#4u8fV)Oii=HcgAf9jdNvn>N_q8H#Ir zs1Yu^^&pL)Yw7J>-zrB-@V*kO#!>5P*tHJmqA7>xFBWW!eCz|Ft1}M49BJtyj?W=i2zJo`R1Lb8tKkXW_j% z2F1^(Iz&8foc`6HvV^%4vo_wXx4H|SK`l4Ltg*mk8(E^x2F_N`po4tlvxun;gaSRm zubu;=>-!J$?6@6tKHI!EujkKtRq12{h+e-Vs_9t-tn8ri?jBD zuySPhuZ-s}u!$^bBJLAj7B!Vx66ozhJ( z{@$nU4FC&+Ia#Ik>5KW{y{5OKVQ7)8A=<40VahOvAiWM#q@fWUqH`j@xM2-SAlH{; zfNEu-d-K^D*CV!Bb5o!JtE;1gWD03j%2kiO%tF(tS>0ENr{~fq9pg8pR=2+37vr(B zPb4i0;$Kq;x>2xJoFc`E-9E`DYVPCdNj*b7in4Ukba-H`z1oIhdhP!JB$kUUuUGs} z>uSaLQPD+Qmp%&IuE9VJ{)7p62E~F>Qk5|**FUg|)l)w}@pXD&`PE%B8JkCCi`ITy z{dR%YN&Xg^u?={ZgD@~5yim}7!Ee(&Uq)hXf2_uo+@XcwBOe&!yky;k!9=7o8)$BJYiSIY!eZKaUc){@D|E zChx^ZyV1&~n^K zY@n!TKgRc8h-t|MAe~)Vd}Xt?DKK_L*R-Kkt}S4T!~~KMMkn2Im+I{iQ(7%n+JQg# z5GzuOn8J6N$kS641!4rF(|Z<=cKIhX<(b3O(?><*E;q`K64l-B5acYcn)Z!0Po z{=?|XA?Y)lOpMQNotp-|25pOVS?Ug@rvx%8);}Ogxx0G$EzT0*aePY*a-r`0GSsA3 zD+t8s>sJV4El7VDAJr@LDByHDAQnLHNfXTk|8Tis4UP<0eD+YIT^94eh3dUNFb`xy zBfPjM0q#Dt%XYg?291oJe7g$JHJROMzfn>>@eULGa2tczgFQAt#4;IPNJjZJD5bi( zjX2glk<5*z@85Z75Fl;L`@!R)MArSDZ9N?D>c+yp!FnHLNTG#{3dQ8z(?!iEvmS@s zp~^V=CnepqTp{OgyZ$AT9!E>?74#-NT^+Qid&L*mH|x_5V}^qb>rd! zT!g@C)G+81{PE}mxTcAnxeTRyNtm{5UvC%PtGKent4(B&i+pPz&gyMTVz5J}>?=@u z3c(zImt(^3dW$+4u4hU+aFU*gETbXvZRmFjk|M~P8%Cz2S@3mY1$}Kuu#G?635fYOc zuURFzuTzLowTSq!vD_uW3dQ8Dkb7RTQvmv!Fb$l~uRJaX8~1GfM>6_j5-D5NYxML| zMGV8)*Qs$km+(-h@pDqn%NuM|Y3rDV)OrWZ(Dn2eg@*sdw7fj4u|dig#7R73*A#v> zB0{7q)H)hWLF$)Z6^Y$+8Yb80OG9T2pX&HC3k4Su4n02;b8}=|De!;$kSGZm_Y*Z< z?+PTdL<&C(M<|v7Ii*P&6x_~ycL6SfU1HyW4I?+BV8|Ap=~c}ey!?;P4Bmi6E!xlS zIw9$cKrf{EPXzasIP4t!V={+TvfgTxvEComYE_waK0jL;J+sblh<=SMADOwx+H6M% z_X;>KsC;Hq7FDfYO7Z{w$TdVGZOd+E#+f=hRhj$m=h~9(Yw30Uv#iRvHUl9I&Z+1P zxx8e$gHbaGIm4Vnc5D|7euGII1Tm!devt^+nL)LDmXpzFPuIlOi!7}KZ>ZcZ(iuP0 zmybX`NdF_+00n&p9B7fu1w32m_NWBTgK%7-_9dgU-wjOf{6UADV-`Y~;c?VpI|RJ{AovXVnj-oLkK%;4lJ zk2j*tPknM%e<}qAl-C{Fh*ZVG&xl1vtsSg?CVI3yxob1**lk%Ed{-XK+i(A|uzv1s zg*G<=2z)7+M9Oz{JYAHeso$R-BW@wlT9 z|L=1k{H0J-5A_j~1;+($^deFS5#6Z7KSm^@*uGKg50MCCYIM_F=l*YfbqMiNhEJld z`7$Y8;UuVwLJDZ74z2i^UwK$@IS?y*XL83woAXnjLPtT%k7?U&yDwP`skR$yJZ@}q zyb_)Q;!S;NA5js!6piiLKTVTrE{ju)(puM|-VgGseep+^SD*c%J^3?968zewT=78+ zTM}RiQ5sAQI_$X>qayKpT!AV-r@c-ar`_X>f*gCSF4J5Ixb7un&~7JLF@=w4tF4TxSq-IaX$pv+Y2T*I+7e_jTEL{=+7z`o}A=@DE zsMhk%7M%rCr{14yn~yI{pAYUky3f)}!^HtV34Qb9;qvtBA*Oek^6+t0UIHJw3p3GK zQiDsMZoAm4X9VR^MG_?*QZf6!x{pr(I;Z(R7SI1)Mlb&|VnJ&-7fw%Cb_96j3GLqt zerg8hA@xhu9K^1$QocU|syApi$8WSwh>8okXZ$CCxb==qpwgyv^={N^qes)Vf|P+4 zN4ioT^U>7MxuhWfvc z4kdgD6=$^J?~i<#0>anDV9AmF{mvJ!C6DY4=n{&6Xeo;7;&xa_zumR!ZF3$TsE|TG z8>!DD?>w5y?YE+_iY^!6F+R8R)03a|M?qI%?iKq*fR19$E4cCe@2V7&}$P&j2|)h* z;nA|2uJ$nvNXWyNfknr1Qn8s;02pv&b&VJ5SKLDYz9O&ZAa0M>`H=K?Obp5hi}wG4 zyZ$rdQ_xMxHJJfeFG>KqSW{9qyjs(x>I|tNyQa?tg}thMQZz^=HjDP5!BPk=OznMJ1BB( z=C^nZ4Gx}^^-Ukcgc73aQR{to?_cWmuEKJmYLR+eO^O@vQvX~fmDYOY_!IFu<;n>@ z#*&edy=)K45x$GlZ@}v;tR&?iF}Qr^oSF{J!ps~6(CA{((Mw84h3@~er?S+;^AQPj zDp-L?FO?zp%UuPlRVCh+%1t%StL)c@eP zx#1|g0#Rb{=>^WJr2PN9HVGKOJeYF72U-&afTeS`tbrMu0GK%3G<&L%r6Y}lSYkbG zpjBCR)m0BlWiyLb)z%E%1YOIiMb5F*ld}8Tnz*&3w#fqa^_Qg!K1=w(E;x;dVWl$l6G9faORLHtI zbpL;@X#h_|L(5gUh2=khH)wa)yaah4F|(=!pc6AIOODf0ydlwmpmrd8U|;AtGU@(p zc2Nyd%0BYT@nw6KfoXfAu5%FkpD>IUgvV(UJd67`(Q;a45S#Iz*+n?pOSEKZeb5fO z>#5$+ys1f708%caVQimja$5=KV)bkS_)yFX0Q*zaurF*ToZbb(c|BYk zo13-QVb1S`2yk%3KzTb;rCg$}uq5blO(z3XqP%srFeg%SLB)?cwdQfNEi3mQrE2Ew z>f3=6BKuyJ1Ul36{-_>p2n~-VK;Zeo;pmI`xR7~VG131zWB5uJ_|_n2U=S0R?P=Q3 z6?|xF%;A*f^?qtuUJS;eFWN$S^=kGyZF#~!@s0_5bapf3pVEEuzm3ERO1Sa6{Wyik zmkfme+oRtR-}Q9Om*eYWTYNw74OR?4&XF6@p#4vd7Q&O|X3A~*VO8UmF1S$3Pf zW_|VuJclc-yuM=PD#fZS8oz~RVCu?t(c*j=+(A20vxT)3%0CSVpYd~(qsz3L#a^WL z2%r%*ZIJyg%VFoa-UYQ1;~T7KRsWu@yb%AJF(1E7AKcSd zMsiGqG%l2hLcja%T;1W{j0!0W30ENp5k~umkfIE2dp{!=A0?$Nr134Yw%>irEN(6? z()BF1sISv)y?G*?MfZLoF3OEveC0Oq^e+8}6|MXrDBlRA)A}6w^kyR{V(^{`Mb7y`;v9@EOD>j@Ie9nVZ5FnIRWMX%wZs>T(+C>oA82W%mU$m9r}*RT zx<5I&tK|LtYgCQc#SEKF zgK~pmn&Mpe_L#X^N^p5A({DBnaSUYhc74<J#GZ_Yh}3qu&U;F z4m(j<*)*)fpXIhJw_4Qi?l>OIj%dTC$Ncx566%00X=f$J6X@LcyY)k1Kveg#!FI5> zAJ%$F%vPV9+(wh?b$mR-3-Tc@81a<1iCpH>RE>vKLq@w5S@vM$on*zR-K31qW=aaW zqm5F37K>Jlt9^A@eFynLr9K7yCVqj4#}VUOD3BzVDSi|&3wKd0Xg+R|qE4?Y6r%KA z8Ik9+0WJ4^aRP$3$gn9yd>=w%WD|>LBNFa-I1JPchhK~ciie#e>?Nj{Zx{E2F}HfR z!m)m55Hacb;CnAKM1VxVmoE%DU$Ds3!EN_7k>v1fsTRUg81hx6j-;HN+FYfV8>ppd zV=HyWku^(@Ds|4|q0w&ER?-XD+dc4_(|&jilTEA&b$wD4Cioe(&MH9~jzsrt-+0-S zouK~M-^cvQIqm;QPbU>af;~rQ#D;s5dK<*%acz0QLPx~dv`d!vrRxV#Ltmg4TX7sY za(5mB-D+k75T3RViXfRdnIV6`|C8|1?g!h&ND|G{Zs1#;T?6ePw7A+r`1re0i0*CPfDyXmz-{9w=1&ytdg zgLmNLJ-BcbkZ;u2MitF>&OtQZ1Q%xPYaGv`h3W!SLauS;3V)<+uwHXm9g>~^<7K`APJt@2 z{8!2gEpF8suLw9QgzxfGtXaxCp$S(?n8I;WfU~>sCc7<5y>Be_2eG@vSLHLi1k%wJUoFiP){ddntP{W_H2 ziOP=ksr=NGSyWMot_OE%zjwB__be)&n=L>5%z9Ow&QK7`IwLDKae3?0k}G%{tCnSD zkKS&DlR!tg{~*6gy~S(Xy?JA28JQVSA+>Qg%|I^9EkXJX*1*n+P(U-v3Qu2J;CTbZ zm>;Uhx3 zU*py4It0bmaz~$0vW}3E7P+scLT8c|8twaf$~i(@p2`Y0Z|Vvm3$sb=M3(QxcNp|z z;!93ebCg8k<7&BbVq9v;a}rk*UY9{b^|paC!oucR%M%(Q6IrBXfAI)UXegpnFn(1{ zC{oyh7R#HcY|@LDQjD}I>aCtb6Cwnp_pX3c_}1xmf|!Eg^wTKB`sC|G zu1H=c{jF8yN8oC%?6a22ng~lgTfsuDb^bgCdt?0mIv2}B5CX(DxlUxyr4I`S6CJp1 zRi5myGE{Jx`r_4Di>1KbzQoI&f*q8yx^H7@YkNj!~s?{U_JGI+1Sm4}9pPS=1SwVv1L?`$*f zw5FlkBamIW>m(St#`oJ?j|HA13obi}&M@ya&oi| z1Ivf)0jeKK;QHgUA6gRV;{&K2DyN;f%5GAtIKQk8*U)I|mHca4m4MAAGj}~8hiw&&bK9J`U~FBm((=5X`Fn?;hLHsy@BUq zJbIS^W${Lh+4kTAd5ysQNzJ7Fhu2LZ51eLZmsnGAg&{f|LF;@EA2Qn^vm(r>@GnT; zK){(|HoBfU3%Q7=(m8Olt+Df4&|@jns&6{xrso`Y(EkGi{R#~Uq)je2nA zL)L-Xns`n2`97_9U@9^vgB2}puEZgcWS&4O1`>K2gU5_sFRc zkc2rwV#|c?2ZRoW*2m($B>!ootpsI4A`rn^{Zke-nAaAj(IoOgEzR>2(I|$RNbY#|7 z*QYZoK2Tsan=7o9cDwXtuv!tD9~1x19nG4rf%D+@)=PwFXhDGb74mmwOfvBE@Ur@@ z*Tf;5Cn8jP`Tn5KgGS6QdSc9WjqGUQQ?$i%r?(r0E`1`}>s1oQ-$0_kJJ-%< zoN447x(sJPh&D`+MD*I)RJr6#0tSV;nPEx3&vV`#P2+nIYdckSAKN?>ooxqzTp}rpw5qf4UX{7tplo2l{P;gnUHRHV?^^b z)`xz&y^ph_Tu%A~*XTj*%$)d@3xe8oyotURJObyTeSo!?gSr>zVnGRD&l#UtxrK)@Nc){v~u6JPS*HGP{d3alptYIxm8 z!r0jX&G{Z{gKf4x8*~v;xPQ_A=&Ydz!2PnU9%r&&&A$8$2Tnji*eCiewf7A&hmB_4F!vjKPXP!rDTUv@mrgoKCy{%WcWH4$%d6O2`t6rg1}UN3eg|R1J3?kBsB08lhHL5sxZU77#!GIlCO!}Q(u+4*01j8k5;bXq z)^*j+WJz93^ctAu1Km2_*$=ekiG9{nr6=z!Ds5h&qfsH}(qZ>+TqLhNxm&CYo96v* z@&*TYmH{$$66X39h(x&@P$1bOy+AcC@itfhk$)6Wr~aqNmq{|Qi(R^&#XS$mfeD{% zwR8KOcnn`WSma`5y?wk3j9d>$8d-4p)DA)6ek3J(3|O-`FeY69178J}18<2J`Xa;|9Eo7JSYpN>Lm!8cbkP|I}hf{b2~4{?UIwlj_SKaf%3@K#t^)D>df~b z`|akcq5c!oT~O60tHsFTCYhG$2uk`_;}sV{|5!9W|CF()>F55eozyISzfV#8xg{p9 z;Q_npBxLW;U%*ojXlR#e$FrSyze(iGFxnF zPji{YALIx2v?B=?DyKY_nA(q$>ILPtYJS0kd~}ud2PhTn%%J*?lgs9#?Jjwc1^&VF zv@dbslEU!a+r#p1ooonbyED4s3sSE-HJusd{fKvaoxVcPH61wI2XeEs`xb=@ot}h; zaQeoMiJ5XW5!$2!?6QfVV0M!E^Vb9%#hs&~vDGTlO=}B+voE7HfS(U_ukkRn3~i=6y`1vTAAJ?c;yBL{d_8-x@lwn1cjT~>HTL{3nYtcsic>fE#k0Of_x zP6v4MC^1V$YvIA z^sOTneq#3s27cqLjtagX6=iQ zK@WWncj-)2s5CFhLXl=sf-rCg9-U-(%)>lE{!1QcCGs-09>-b~__Uo(Hr**J}9onfzHs zFw+xDuD=e&G4ZV6ke`{vQ#o2)XljLF6giFPs1V{QDKq*Bq3A2zub&4bPr*GJUR)TV z8YZIdnJ2*1?!~AJkd$3S7?V||dj*Uolzk4OlVfJt&Xil5$cyl4dK6!e$EMBVn)P<@ z%bNj0+<7ujmy6zDKjZ2Xw9jAGDvzd8Xtd1m)zS>@(Z3!I3!km*6;$LedKUI$91shM zHUk28-PV%Sd*{b>WPJ3#L8)V0kN4N(GBQe!aD$6^&l*>=T4tSd*Y3@}#vTc7%Q4l> zZ(A?Zyhmm0Yh+g7^}ukYnU<+sMGY>gb1CtIoLc{UhKk$lg_mV7gC}jfcc;BK>)>KG zI!-?AYMk^RfgxJ?+~W^+wd9-rM88ZWVT^^5jzEDvZG%ZIAj&dSj_9Y`=0+MCs&Dq>#*jC-a~4IjNYfl+ad|8QpU*kES;Q20$Gz8uaOrCqg zr|);BHXY0GniMy@m<^XIY)U-=V)z4*&K`(3A!Bl{_(?-nui7S_o#a#r+Ww5Ej2(>5 zV+Yfe;r`MxVZ03t4SSaiuYu}mjx&pedmoGN$w8C80TeP;x5n1G zcq4AqNzH(hlKIVuy8~B}%c}itmfq(NZvLpgtZZMO5r&9>8+o`B^H7w$n9F{f0x;}L zlr6K^rxk@s-OdN1B%6-6@I>CNebE(uZ=w2qnfIU833m7igF5PzDtK;f2UOnEyGwOj~ zu4*26O3u$=Bq0$vzINKG-rjoP9zF-DW^>rWWt9^~kPV zCkQgJaMs)G_@rO6pW*pa_J}bCqHdana{0g}x}R*sFHYju>#Oi{m#Z|1=Y0V$97bB! zsV%aL;d#H@ZmN|Jw$v(Wrt7v@XE#WLcrQ>|j~Fh}Rs?+OVif;$e`2M_F30BfEK|6S(Y!S$$eI22+g<$SH2(W_{y7-x_T7y&Uf6*&ZGY>IpU7Kv50Im~ zlsx>=3B-6XtKg>B+q>}=#>Jmm)0{f5qDOy#%9ooIss7D6K*&<|O?z4cobdNl#Jyiz zN?J|;Qq(ycro?m-=)?*?!|YU<_ZiUgsYK z6cU}|gQz>cM~@FbZu&9XO;p`qMx0WVxC>kfY2If{U#ciof z;e`b$R#sM?`8x^n5)jvh(=sz1lCAi2OB5tadW|oE#(7~7I%$%I8manm&?RO*R%W6O z?k)ChXd~A0Y5@pW`CG+V`Y!MiSQFpWI|3nCK8lL}CS$R; zr%`YFcY=#O{fVNPN{zr`c>X>_;%mgM7rM}Kug9cNqwnbXoNrL5a$Q)CWxhGjyRU=l zqwl-p5&yT;))ZU0@YQ5=3s>apqG=}AC+!quAp4A&%Zcd@`|)>t;)->v6?pwdbS&io z^Zx!~nnU^H_dPui(44z3LouJC#W+U<)$DZ)-s={fo%Wrxwi<~yUDj8N+6HVfeHf4o zXggA-OZ0-`BX`+R^-B;lNy$GvJd8U|)Zd*Fd0=Q*qR$HIH1?K?jaPHBP$>4wCeSu7 z(XL!QEwu&o@}>E%k%9-Z7x1pk7I?&uw*3@9)A{M8a92Ya7tV&B zo?Va$)NDE&3I7Y>oiQSU&|bk}ugBDw{J)K<_&MlFMP{)Uhvu_uBAkmB9dz=3n)u>( zv~C~{7&4a<2V60)J(Ls^*keJcN72ixzVUGB1)u=TO~p?%Sx77a;wfnYcFuXW6)SnK zPbV%p-a0{Y33Ta*J7Yf+I1N^uuiH6*>$U(#y5xMF89?5KNPrzWE{cFak->(gxdu-k z;dMdtrC5Ztxr)lOo@!$iaGj@#Rw&~-NC$rJVxGAr)#7@Son2%6tYKiGb}8o>Uq&tB z6{c0AoXjmvH0nN8>r@{um^}?zXPW~Lbg;?Web=yTZ$J(77Jk3qPXq7L?IPi)z!=dQ z`D_R4Z1kIGm(<=j)Uuy`-O0hrrOQ3A^pLm%GXiLxIt;j%Fn_ojTtBGZk*=($$kS76 zIh)`OTEIDXT-JWI{kVmu*IfV}5;L4wNAkXIt@rtIFLMksi8imZ57$;eUQW)@FLdlY zP=yqbq)EA?RTYJ4_T$2p^Q)drQk)C($pOnnK1SUgJcxdyMZr5?mB#5{0|7EN^XEuzl zm4X~y+I&>#{Q5=G=cCng+YV0({DJ-`Jg6f&yM^(Q{xfQk!i6VP)^L78n%h&-<@gD5 zx^s~}_ukyTrU7wh$!cO0a@=K@JWnW0o1ENYM;l;c?*K!)+fuj61c9=w`pC)>Qm`^) zc)n?D11UWz^wH&#wDbnv9uui?(7djBr*a1t#^19TBYTGz#D^+a+bU7N`uyfcT4E_&IMDXsHOpe z6OTr8O3j%(X>^Lm@i20%Azi&}FAJxpUM0kp%QU*gsKrocj8nf>ZKH8l1-Q-64iCT; z!jal2jg}RvmMUP#WzB&T~e0gFp@Ay zNM7gCY`eFX{KpBMiUZ}%7pe0^_zgEF759YzBw3)mF%?`uz4}aF?UsZB45bTr-G{=OimFD+$sA*&oFw@b)%! zB3wNdlC17vn|bthhag<| z{3s;bZGUZir(crrU1XxsNQc2QHA>wKSjQKDwy4w>*{N|Zp+RPW!OF1EHqYzYS~$oQ zr8ma0(P2rP+ZUjxmwb28%`wXiatwftIFLM6fJDL&5U46Gx|Kzyle9N?M|+?Y+oUMHN~UHIm%6d&}~ zykPt@UFhRj)3|_2oRdbEU}E=5DbZA~(A4SN8jc{sVC>>*ddGo!(3cQV&Cb4`$pSi2 z6-Tu7>NZ!LsL(fmS(dr>p!UEH1jy7)K_IBd>~if`@a3B*fh_i^GLt7h!1t99Lc`O7{a z8>PZy+jLQvk~8xxfLXv!Q0PtdW+*g|2RWlAK*whf*7N;RR+x;>1eX876WQQ;o&M;^ zv|8(OllUm*^AotbM&BO46~Yhp2KT?_#+ux>RR$3*$&UVvI+Cb5TZ^7X7ub}T(=(r2 z{!JP}b#=MgV#9UUvlf7)sA1QBSd?B9`YKjesL1BIgJ2U$nEOl0pi`A(&b_og1}s1k zcn$xg2wGX8C+hv#4-!vJDy|76ap~;!#v-fO^S?fKjtij2A)}yKV;A&x{psdA9Q6c73rX*p`I zW!kH0kd*NvTq;%-_ArlG+1TRZvdwA|;@iN$4rI@{P`XgLTysf#`Wqy!NdNPVV5C(Y*u>Jub3@g+tOO(F*=`gH^b*z{bIz2V8)^*h9-U{>9@fb z3PiL{)wzIDSI$9@E0GMeJSK%#QPBMhdnh5zOB*vwe8xQ2D~O;V=$EOIytw+$V}7CX zs!m{vH|aB%?<@%{2?A;U6v^1rbE8>ql=x@~$iR)(y+@C0cMBMZ*r0LezjV*8PWF6^ zKGDOCUQ76j@`{T|B9r3*Bsjws8y39JRP=_4-_OxRw*pgO0qS=Z99e8!Q%*4E*5Ddd z(ZyIGVm#>4V;jaz-wKuR9sjsUN@YChXEWYJ?X;I)FaSdrBJSy#obtgcNA>l{n>!u# zzxD$I5g$;xA^03rOjwhoV6W~=N@2;j?n5E!{ca9363!K?J)IohxwBg;1zNjOYh07x z5+HE_$}nT<`1s33p23G53+#|Hn0ZF$;E1@b!M_X@Jdm3(rH^IouXnY1a4&*6KR>@G z2u5l?_OYFg4JShe;v|=8_kw8D8hhNn;M^R<{eX6J7JK?PL8j5*DGBkMlaD4SWj zhYbG8MtRE)4wRXj)%b0aS8?Y?i!|v>fq#m)i382elzvgy?$wP?Z}hzFL?lkVxF$E( zLTj7l%(!n!W!By@dQV^5`KGbL?2&($1mnnHLE2F4k2(?-R?gV=P=Y!`ye-tLS1he5 zC~`YU7_avkR|&GJ8JL-6Wu9GO9p|XX;ULdYg*ZtXtNqyDS(c}fd&AYhz*|XBZ1w5k z!uSB3;hYT=W5Iy2WBRRR%Xq+O*M$XtKRkR_I3q*yWkq0mnr5G04q_2vV}{895uh)lt3GcUO`WN+-d;60+#BsBO(h9M zii3*i1(5$lVEw{25-Qi~4ryvgXXL^hNw}?vvUEkS#YV2+`<7zjz}nW(1>Duw(q)OF z8Y20>4=oV4pZB4o_B(rBZG|YY^ynquQl!%Mg_!S*2yvJ=YlP>M!zZ3Whi|2RGg)p- z=Tvds_Fl#ykt}ARsasM`Cy8n5y86bJNHA-;)=O9z1B?j+#`-`0YY9~#m3|(J2nM23 zXdt+Cor6~={ptrtNEoiMF ze)Sz}Wa~U{d8FDRpmYl7sIJ_0za82|<&ykI{iL3$ALb}>3r0jM zj$;$$75)*R*0z*rt*|*#z8kD|?z|TJc00c6P|S|x?TGoA%3}=hL>waCEGILvJT0|0YIS<)O6% zu7g(hwMe!yz?nJaF~bKRM#F-n?nTHxwtKvAd6XpDrsWru5_suq;ke=XUSC zk1+a3Y1V8!{f~d0&{}sFf1zMNTT$IKo4CiBBEBlCqh~~pb^*Foe#zys3%Lu0OVVGH zmOGpDGfE34L?O(vNtc&C0myqRL%f@;%Hs|@mgSF6(QzNEN{|WU3a6*|&TGpwQ3A$Q zFZcYd;PhHJv){XfTYXv^TTfbrvleYpk++WT{oHuQvJer$D&!P#3x9l->~761rsC@t z!?`4K#h zx#xoW7G_+ui#)2HK4pzd7#1I_T#=VBRewd(wJ+D20L@Gi@iWgLTiT4R7^DLpVobAk zDzn)I_xP$Ib3L?D)^A~FazoZP;jiqu(kA4ur)|x28QCTrr+cV|hbuC$E;76m&I@+J zI0k-3#zIX~#I2nW#7CdJfzYzVjPEKg+A0y=6_P!hp_42y&$U#}ZUUWWo5k!rdINwo zSdjF+qT{-UpX}eI?UdV2Rmd7BC@CZ$BPZg#)UV6S_Y8J;Fd&j{1bio8ls;59f7^GH zvzv#klCjX??)V;~J3KncdGw{&pf{DU1I?yR>2q201qse{L%d#kRSl_b#v}d4D1_y^ zZ2!wPgT&F_W%oCJbd0elv3wb(x=M9%mFh*l#EKHr(e*qi!|XXonS_9!d9(>{!-uUZ z4W|5&;*xop55?u@FGSP*q9+cN!RoifmP24tX<*~sr-9&tnMu;39{L>6AAe-^N2(_H zadBaEZ_e0G+rdfOvK=$FoiJ)6FNb4thm_=13Yea7fz_&&kq5F1 z*Wh3c=9}8lkG9q>e3?cA zUVfapLL%Xm`!JoZV0cBr`B63e3lrG-H8<4MA1j4FTjxhsipeLJ*AW*uJ<;WMlOz1% zao=UKTmIhfZAd!>&OAQ9NsTH50e~m(EqjzSz-GdTu$xxJ^QUDZs?G54Ikr>vsx|9^ zEDihD11ZCNn$w_6=YQGD5*A1@(2F8JoT3E=h z&?utzMyspU6dte8BOZu;>4dE~3jsh1bVR(^9EZQ~!7u$7BVGk#H)@bP-Tfk{Q)XLv z66Y^GF~4=lK;!}~w0x|%h2KR^2OIVc(M>LwR3O90;fKaJ`CV+Obrx6VJ@{e3w888!Ql&zWGAisZL| z1M(mL{J-8b0W-Es!m9ig@A${k{m-7>);pXRS5w8P~k#HJ?C9aY00QTzCiw2t;9_H_{LgFh=0@3hX2BcS!_23Ztr_t;x*IP{1l6Y%*QjXdaFGvRwV=JZy z4~SvHl9Qy9k{;`BKumG;J`^D3Jw+f=scFoF=y(OG8;lM~0IRzISvLA?7m3IoHEys2 zN*)b$6+u@NVhTzax%yE+@)Io;&&gh#KuX?fm>#+SyjIWAc7e~cte#aw zIC;}MBNMQ25v*a5`xzV=Y8faP;+U}+6qz=e*kUr}qdd_$Hn3zd^|B43DA=I*S>-;n z^VFnOl2i!^erBduk`Vmd)A|J>^9i~Eat05KmzV)MXDD*NMo+6|-au>X?0aO3yeJ1BkP`nVoFayNP8XSM;48jQO1#NfJ%FF+$n-xONwn2nL4bpGn@t^)VLgDa9p zel2KscXv&jcXutpUZ)RMc6pybkT@7EhM1rtK|t_A2)}tP>j=4<4C9C{H_?qm@(dc5 z<;VO($k&cil0hFnc|I4ohL)10NkhjdZAuH6ltf2|MtAEB8lim=KJOAe??N7hOtf%q z(b&GL?@&RTNPBn8rL{j>cj92sqdUaCp)1VzhJu6_>R*0AJ_} zCd+iJ&*mIN9^yBN~8+Ks;Gp7^qs+2v6m-1qQ~ol zS*BCv;yb#lz6A;;edmWOdL02MOcwK86$MdI=*)ZKZFBaRw8|vyeu$BjGTE@`W?@uv z`L&}0e$f}p!Sn-lEr(rrB1;>&6<;$1`*)`*#UK6q zUZFoIJswxuH(PC%c=4eBYopU~--nm;6urH@B9!NI4hxk1V6kMIZAB5m%^8yR&h>e*vRNL|dWnXnQO_QLP;+ z4%HD#g6F1^>uR@x$!htH(MV<>JH#sg)O>*g{mIG}*@KE_WPgYeqLBMr55bI-dG_0_ z3#IZlIiKkEgguk4&=;#X@VtGHd38KDcs!aT(ckdBnfiOvS&C|%wLtUDsftpyY5euc zga)LX=6cXD>H9~B(kls^4#n@1ImYla$l)AjoDMs&4H3Wpr=+PL6C9Hr8x!wnlSAN3+FFC_`VNI__0Xtb&7zzBt|x zP3Q9+sXT3cfYjpyhqjvHvLAlDG2EXkl`#^f_2>^7?+q#>l@YN~6}!Uk&M43tEkFj} znvW?m&1^8#(-TfCN6;~0I#HZVqgpGO40B0Xt8waIJ1QQCN_zb@T4TB=f;4z}UtCA> z+cG)#XXVqaQGb!?AG~!=1bs@iOt-rg!>aY&ZwD5;pSrwTOsE%6<`}6mQF!U1$@u$a zf4+w0fwFbLcQ@(73I8qAW%3g_?9XmR2{m0`Y@`N{+0kiNx8X7{ld!Um)6dn9ik~We zSEIht3&WX(MGu-NR_k9>f1Yvi0q%IJTI4n6mDlBS=q}o#KLbHj!O^g91lMQY3y84OhWzP9DK;YIq=)eu^In% zOI{Etxf&(SViGeo7B%ch-{8=@Zjl|Pt#U$mx==S+7uRcbx<6Wj5ae|rh`ITkkN@tO z8KT1CKD_EvgTVy3@nW@tWyA)R(6v9gb`tEl0jE0~%983o{^uiz*Bm}`A3~DlPy~ujI9g&at?6zgLp&P@gvB2~CLFAIKvn%B*l6Qv+G5se39}~ns9fZxej@=Lx z<2|4m)sOf`QvTa)rXWL%KZ23?rx9aXyTtI!n2f*WYT0~@)NsELpi(HNp;f7hI$Y`Q z*W-^r-zjP7BXF2~7T__HCzJESZkIW&(q%o~fJ7pNYXoE^cGvULLd7z^&Wl3%q6BI8 z=?N>_{Stfpsyzyev;8>{%BfN zx3y6;a?f6gX+ejM4(pD{%`B{2U`l3`2)=KOKgkCbCFY)^! z2#*f0_C~Jt=AMJ*C1c3r4CveJ?41ZwrZ{A=SF=BVHt!St%?u8o|BF1 z=F8!NPlXxL7sylw8l*Db8s+mGv_8mBQWwQfWHmFE87egvV~i@N^ppOIk2d`_<(A#~ zN3K}%4{^(3Hs#0d-YlJxLj@*t4Gt2cA@paD$FUF!ZCIiTjV?>|HB;W=Pe$EbovKvG zO~;9)Hqcw-cjorB41^HTsFFy-JCBMU__`!}$KgN%2t_ z(1pw0Z=8=uj)!^Z?Y6%$4u?D5SDX-;T1&U{C4aMGM0_?$Z5VAf=fl? z`1PyuOHpr*ZeN~s;aQ;bmu=)KCvbUGMP_Tk;DAybR;<=QRnw*6b`ssu*%_aez+ytC z;d?v{Q>yhZ-`bVFX6OI9QCam|AV?@nBsx=ds$4%lE)F}| z-}M?h$nsd(8E&P7Z;L0br994U$rsBs2oGNBV#%K{sd}UTCW6^Oyc0ub!uoVTDfyiZ zm)m6}k>624Ni$WaD0)rZ{>~*%kZVQLa;Mo0D}AjHI-Tldqll<7+C#M8llb4SQQmlp zOv5UhivNzWS{c32$R#KQ3Yvq?Q!Tj>)?zRzBzYX~+}!d%`kmzN&eq9ft$c{fKJ|92 zR>sj&d`*Sbb*e}9E-r0(7+1djs9Ei*o@TaJK3DQ}DhJnP0fGMq&Ys3las-h8o+hhcz~_;|N7_M}0#qf1~wjW!FRravXfolX*oYso&I z?+-B*e*e5e%4S3Vc;07T60a`ar0LSC)mL6p%I1a{!NF%?FZYY5zEaaJ*Q?00sQrPV z_|@30-Z98a)EXW) zRY9pL&8|*aKEBV#Z(+tel@V$NIkvu{9IDNQyE@HLZ-}dGMfd90AtAgRkBEwj%7)h- zou=M*YtpbK=XN>$CS3Joh^DeT1c#V_!!`_*oy4{91G^i|WD|@Is8iWWD2)l5%-!(` z$KUbfuQ0GAK_>VZxt#RRe2fNV(R2~RHNR#xRcn=j&+ht$j#N!LrT`>);f%#{i-~|1 zs?x?Phgg@}YbP(#mD`)EK`MYj^>G71cBWEj65@ZoE3)`@BL2L9z+i7?qeQt%MCoZN zyUkmXC<-l)sdd$c*VWdglwm`GV^t*upT07BU5#cJF0s}rR~bWfGDn?R3L^$D+wV=s z^MeXiyx6VoTBl*Ff{6Pf441{Y2Ncw(HjG5rLbr_BHs8l%=az%&u4*j+2~NFGVoG*t z%|2)$7YN3z>V4&SkT%Hv&wBGW=q8$l188+tA?}#?xBOrk46*FSmp5K6Cz^HOtyj?W z@Kc3fA^dbSwL%|2)%j7eE8U?EQ(Yi_2guUTkLAH(Hr$vL2-X(*KGtw>x8I#yGCyWr z`;;*!)BILIVCq}{CR(k{xOd&>2;87iLXT@HyjY4IDPhw2rQ>gi(za_a?6EgLdf#do!g*$FlhE6ib6Q+Ga5OQB~A?bv7G) z*LcdSfysR=wGF%iI_y{vQ~}yNZ$) z!Fn{F#7y?v0UQu}QTHljDwL>On>0E1HSGeAUhAiCu{Hc`xUdJZ=m1Rz+{xc&Z?~53 z2f)UTY58MG%WZw3TElpVXdz^wdXq9Pt0~1w*N0K1^{=s4ou(Z^t$kOZ5;0czJs~j8 z7W{xq`VbyHjMZ#9&S~7IWXGTi`jyqPkF}eb*}~k9wf@+_Pg`|YtdE&q{~<+a@qrY< zpd=*z{mWCL62k=w-kT0UQvQmIVa_ZxFm)js^}LzAfrgHUm2*B=U;`}V43+5#1u zl}4ijl?Bkt6Mh-8#i~lk2KJysmA+a}9HdkSjZnSq*7NDA(&okfn@=-ex}S18$I-oC zQsz8V{Z#*BOhH3shq`vXhWxcyV5c*fKyo|B(@pbRu$O?mDc=yUEdJT1Qr$2>z z01@C%-q0|U3;*_5VJOgK5-Buj_ir_&?|9^~cbftB|NSIm;=?2zpfM zjOGSMf(|%Nf#+fsjoofL%4wehvVZVuzhU@mR@eal^*7LG&4k<(H|$4XyyaR(dUf6! z!k?6sREU0FpH^}fY%WNM(p?Jx#`cdNTcb?Q1BUU8I)1zJO)4OsvxBqP!Si$mVN@86 zCOVd1o$gf_wdb={*R}qM3xkybeTUyxo3--qB@0Ia0-lkU#bo@OaE_p&f^rr}C+k}v zJ?Z_}{JLIO@aApRO1{KSRU$2BhNZ&{=pc92UTl4S4-;G^o&Xgewq!;lhkzl{TQl$6 zzm%)tyhVEPV^m_$gyWW)$+byi;4Lkj;;8X``@dQ-{7MGi%Ed%M?UCS)vSWpNE+Gbn z9;?&&ffnJiL!PMlcrR#JDhdo z8;)RBjn|V)q(p(%C0+@1y;MbWjkgPTH>pvSGF^?g`wicxYdu-$CrNGHlJ`pNZC&Ia zX0sHPD1c1OVY`_ou?CQH41j8J`}Tv6-)axUG4|E}DC>S&pTXP^DD2h z-5Oy{8hFbsq!?Ws7?@Y4v{$gvpE_(S5aI%%<42TYI-J5ixV#^N7_{h5=&p!Q>4Wr? zjl$V%zxm3%LQUg4R>iXhXl#?ZB|kjq;@D9%jfoPY(QJLJc`hE9C-@9I4%^>)-!cCw z-JtNdo_oIeFO>iMOO?q4`ik$uHDi`AF6hr}eL&PZi7OTN zYx`OlT=(j*18w_~IFw6!op`C_sxw<^Wp#GSW;^9`oF4*?w#U>~9sN!XK<1IJ8|?Rz zPM3DQwUmmsS1uS?AF@$*x&Foj{3<4}Do~Sa692SYDLI4LbY;}~U}6q-v7FdignZ>z z1`09jnlG06K|p7waAXvpd<~=k4SI!NR<^Oh_4y&Cm6cV#MEjs=A3z!e)psp-H^Z`( zTh*4PfVB)-5@+oUu*;wBN#=BbM_2WeD_)pk)b7rfEmCGdB&5&=y{0^#K=!^X$Y+$` z$i!t}@GnpI5&;h(r}k~#u)|KQH-)<)-=BDhAW%-&Wh2=koAPM8pd=eGrKDnaph^kV z-JH(IAW%yra!A4|cbn7_i^nq?d=!iS(e;Sb%+}Uc1n^3-Es{rkUI=Tfm%ht~(F*ET zxk;xJH`>gaD;=fTa((*kVih-}u^aQngt%ky7F;qZB6Zel=nX^3oG!}IYk%S)X<&6f z(}aZb{5R`E4+r|Z*Ybsm&#>s#qh_`j$m*7DrYe1ED=oE5KdczZEG|T@_Q&dNmFtxo zetk|Uof&{FU)e{>KqmpfGV;rl!)`(>eyN?@`uL3!`+-MuLc0q!5|}iqxwGsGwxeQw zW;VCi7i!~g2s)0;=IX1WD3nIDlxoBLW^`d^AA5i(f70)iP^Z)V1mkhd+Yh*?7#Pvn z^#x_gJRa(xk)`&tR;()1YJYSOU9A%l9oiLNwnuP?C33WDfD(ixIA8$YUYp{eXv#dN zNvQ=C3!x(ADp^85y&~*H{Ml^_Kn-h~K9bb?`e+{u{(n#l1l$|kdlJyBlU?HM@dz-u2QuYNzfvKS`4zD<(Q*L<3wru9wZb??no=WtTB6I6uYvrC&d z9=^Reov|3D9sWbIB|$ER&do%eh+b>_2jV9}fK{!x6&`F0u%doDJHZeqyn_iWdY~{-#gtRWc1sRCA$+B(^d*K~Hi;%&|HvGFClxOt5?;}1AyvDf_}`$P zC$x8nC(Hb9t<_2EU{w0d%-p z$YLP!--i7iWBuQ2WN7HkeKuH3FaCcys|>)uCyR%aZLC`8uxu!`MeF#Zp{(w{sQ8q#ZX0GD4xB*=8qT^6%so_NNdFnSI~qAy&> zV6CmQ-}-Pwug;m4w8F2;UAUDNN=m6hNcGe-f|xE5oi0wL551orqaQuOxk^N|3f`AQ zG=|2!s-5qT5A$FD7%o5*oKAz7%WB&@?Z#v^&JkbXSdb*vh^|!0J`?aPj8nnBE<>Hv z$jt$R(ua$PEfoY6PHmF|xsVk_-=Zl7_QX2Ek|+6V||w{ls4x>qw}` z#LM3r#L_5bNP8Ii8d_^VXM@x9;P0y{*xs0q7PV%%_@pW<9z~5c+*4%|JNP=5Qi?al zSVQA!Z9)DU0pDu>!%MY0){QlaGDBG%!KpUq)#c5u#hc}TAlSOF;Ar05>pLR?o7^0S zUG8U(5hdtV?4O3u!HD;H5$CM6J!~Hp1tzs#Q_htLT_N{EAVwxgmRr_Fq-SO?7d~ur z%6fNrZOvcTW}UdCm+hJdg)>f%_p?FtvV7Kh_Qg_3c!*dL?!b*jQNeHJxmOQa*wX&T z4cZ0L2Z94(Zho9PLgR&u5wr^4y8Ti@pDNY@OfQq%gwQ-WS7@ZVh;^^ubase@lr8c0 zNB3i`9{FZivme$@#~n-y%IK|zK3@A^kSNrC7d6(&w;h-Ai71n-{qs+6^7L)JG!xWq z8#rMy`K-@w&-B&WR~V!^2RKc%c6ObW<+|foS?okPywe1^$LqtS4h!l6PaAXUWlIz% z2MoM_p5F6hhy{M)nT!bJ;M3fEW z-Jh}GJc;>vIRq$XEHh?SDcXn+Do6=j<+>kY;wHIJF%m^TW!MPG>k!Lh>O^YA6e?Xj zF_G5xh8{s}b+PDUxhCnaS|w?3A9z^j%q|rXIfU(`FaZBjo{DVPHkbbFTZ!R1<1e%t z{?_w*dU*A6k_7|u;*~K&AyhXAYfT9v{-{cmYW%2_@9(181&8yIl&5TB!*&k^F0}en zUJVobL8wE~C3BL*4(%%tmIR^q(ei7U(Xv=jU@{`GN&T!Lu$WMe3Iq(Uex_6ILWq$>oev^n-?yxVOB)&{KeXq~XrhggJGV1MeibzfcUXA{wmR*%{|)O4!r) zWWA@{g3;O(tO`D!+>~)%?o}n68!@IGW9sI`aZ+u65S4SsQMC&3h-rM!y zWBmj}j$+`r)iZPiP-6B1zwqf4Q4LyL$i)K@uX5t zXyizoVVD#&Q^Qu9_Z$BIVO_%*p<{|?$dOL2-%=W8e9|UWNH>lk)-a+nWPbEgh1AOy zuGN}gm?%YnD{_+SYjgL6w?Xu;UtW3x?qR7iAxV@31-KkZ5$lEsRJ>>_y@+y$jAyV8 zMN*n$UwDn|`cP0)Brhy-tIa2jDo6JNe5wBwk84rQ(}=yJZzen<%f2MU`=+k~>764{ z99UyLIJHcG$Ec&UR^+A#uS|6?2N_@t)BKthw0VI5Zr8$K_ZK2>JsT5hdBEm@+T#fI zirQ#6sey!hAwF083l&zk>=*s({4)R2XAlCkDR>>0T{b=&fxjMrWRVc6RCSX@s3iw8 zLKQCh16ySX_YPsbsW0?3Kg2Le%HU*@?>A1y6eZ&o$^R&Mx;~6AC3xR47Fc-1`GmMgwqlwW$^vw$P zy9ps<5RC$bS;U?Eu$~ z$}m)sq6Ey)O5%BM1L$Fl6#8Fp7v|{zJ^z$lrTRNNm6TMOpWRbdRQH=18_yS!61qaoW3gaFj(Sf`?5EUWXji@SFO&&N=X_J;^7 zN?pvqMu}dMM4-+}Dw2K-2hMo!W=lQt;Pl@#N{`SfH7MAB=h0Fm>kz6|eb^iRS86)o zTB>V1uF~Cm#Z%R%|I<;3m|zD~E5)i!c{1H&yViseiIH)bH)w>3u3;B(hH#_BxJMnU zDSyHu+J?PcM-UH4U_%Tlj^1lb50TvI%$O5Lh2hT2QMGTgq~vKdDNV!YC^ zmCNJLEfk? zv9}+;{SRywO8pB8tc<_+TT`pl4W&y)(*Gv3e#XeM8XdyjD(I%s?t4DEu@ zrn%H%_4gy{KJkKMmf>#d=)g$BFk7dAbRmUoLG!eON-+v>@4A_X!}~@162fgRNVy`N zppX=R{D%q*3QLFU{|@T)+g2UYz2z~NZ|I*^j-aPx+3-W{x4GV<+@txv^lN!yxtr)}vlb5Ia>B-&0cm`;DKtm(9L4V>V zxT>DZi*WhZsYQV?9*nVr(AIIzZRcSPjVh3BquEbsu9@c?Qk-u2I2{&qG zH4QkLDcqtG0groQ2sR@hpmhw?Gx6-v1UksJ9d1+x37%ql3)lk~+AU6oi&^;<6%CJ& zrhSWEGgcMs)L>U~&;nIY0pc|z|4x49w8WATg& zqZf#a8UbTu49{u54~wUe2Xt1kU{d3EG-A06F2`GZuF7Qig-A{xFr!R%+p@81?@ykm zj%ljl+-aC3P3ydUfB;Oi&&G{~Hcq>Ah`d$9 z$aSAWf(zrdMEP(TCr5k1uPd^vwrtlAXw{6b_tpJtUSufH4^ zc^{=!$H{mBHLx#1fc18MxJd*@Rph&-LpAe6@;(pVE7f|QyGE+%B9)go=1Kma?sbY_ z%A2M7*>CxrT_5!ziWl&~Yd}gRHTDV!C$`24VspfFMVww0;JlwBY(cu_E`OW+3e7RK zN2W4+GYs-}|NS1II&`0U74%}Q4WzS3*J_%_Uzybm;K925d@^IG_`{UGvoC4Qq-<=c zu-KsTK0;vOd=A76i)9Tu6o$_%wM0PD>A{A&ycii)>H2|sE}!v<`<9;k;QhOtZa@R` zH(%{@=1FIXaF9C1L$~?!+QPnSaFuW0xV14L!=f{kpl94iH^2c$gG?MoYO;NATV%Xf z{-S>|0?1t;l*w)ys6ul3^1$8J5~bZCeYqpArNI3n>U9bVa`bOS`#m zn%TK_g#5_S7^@*x-~(bvLRj72giF&MgE{|f#b0yzYZ2Z%^O2T<>uEMT@Yb+6P?vWQ zHy(Tp2wdF0+6*D#Iv^b*!?{(k7C#5)-Gjzvb?tNgedewg@Z_sNq!9(k!(POfvy0=6 zi@45!^((jCl2&SRt^(|(gz;EDDGTy(QxQp>+H9xtQ>6QTlq z4qG8Gek4cY{t>3+0ip?NB9B}myNmmsxOfcErNmFzbav;=q$*9ecL3^l^=!dCmiEpx z98B$V%f<^7{u(r}Th|hLpd2e#8ioUbj_QXra5&aL5-Ae?TwoSAyy2us)wcbo^2&`0 zOn|I+H>HkM$>A}nNXp8}F28Y+X*AO;+#S5v90ZEa%p)NCqXgtvG~dI=A~}MDV(mO% zW`X$ZId%?l;9!axJnl-R$a38qcDKtY7Ez#Ghypn zazLWKM%bJ&ZxGv_DEVF(_~PO>^&0e^9}cJ7Jr6`4kOJGd@PBm=u?pu)2g&jf4#i*0%W`&ZhF ze*b&`K(GLaU*{<{g-6X}v8_2&xZ$|gOUhKomSQ)pN@X<4r1b4%jiLL9gSdQRFu_qvTE`+709u| zt=IZ`fq1qksvIzr7!=dm+S;pIIjOACmG5DYVibUA$61f~p=eV?mPou45Q1ceJmsF| zI#XDV)o>42PAoS#wl|pL7ji8byRTwc4PacFVd~XX|Y8cYElY!+_H!oG`stxx2l& zpZ&?h8`==Pg~CH(7>`wV?oFUQULQv5a7gBMsqPz&BoC$XzxonA$-MO~FH$;_uWUM< zAEv6eRhe-2GtwR>8bxKQ%RV~}vwnzdp`uipY$xs#jx*p(@1@g2`O~W_7fwKgr#oF~ zB;Duu%Cg0Lx>9&fgCd~n2AJ=gK73+OS0K;&y%*fLwq4B5@X)oz!)?6%E1CRF{DoaW zpQj{Tn6#q&0=y(>Uodx58Y2Ob*Xw|=Ar2M)ZV}8}N)UkV>H`!beTizF_Qy>iW|AKF ze2h@=N)c=Nag^=-ylxf@2abTOO;lAiiRx%0)k6eO@KNO$K}LrLuVPf;Z;KMl6~BT-XptjdA0eD=F|?@Uz1gG&`48qAC` zw=B87Ia{FiE!@uliuWTRtL?vMe8aTd;C(BIl}a_exQ=>=p?vAkKS?IM)+}IvCfVm|Cw8a^Hob=jz86 z@%QGd;=EG-<*SdJ`wSSp}skh&s8&gS{Mzt8Or7o^24MW~}8^#2gLC?cM(mY&-#v74dxm_m8noWx4Q zKOtm&cZyRirG)}ViV>Bn40LZ~gs$i5uUJAYLuY~5d2JlKPqipb08$arT}x5U*b5_V z?1pZZ_>Az2hSE{fLEGc{IF2@q9V9zc+xJ zNT=}gt_FIDqkFX?yt3+hi~}Ig47a7ke!$Aq8sWC6Vi73!IyeZdNu-8c*(Vu4I8$nW zi)cpUe#25S;4H7!wj}VACHpT19+m-8@yJPg5NmO%j1yFI53rWguL4+fka&5g9`z5I z`xVTgl8TDh6YG?{nXTdRn0m6!LhN zWwa@M=?lMy6_^yW!!#j9(v8uDi*`2`0C_?Tv2%Y83QSL${rN>Y`E z7LJ+l^nZc=pH1uU;{?EiM1hr{*Yynem~hZ|kRIQg`S2BM>#DFi2Cu)B7LP&c(Vz<} zYhnb|rc(G{N}jS^dY$COAmXuZX6jbdiu-Y@3Ce#AAlw#WG-gHhghr2*ROZ)2+z~=W zMIA7uFfM|6Sp`b9oD!`{btSlAP1uLeXIJ^2-A{oNCR~dwSXcd_qdqF9o$no66S^g= z$4$-t@GOKEs-3kb(?M|fBDpzUqOqWa;(sB(V^v7V$~pNM{X$WJb~1^{NgI7_2;P5WX0NbTo z07{WQU6jyRzMM>%Vva;=@oZh&b1eBRWz7$pJx8({NZ(NQQ$q*{?Uh_9FEn#&3>B3bq;olBLBHXN?-d)a00sMg!RB+*<6!wc zaYc=8tHb5aa`QP3oYSonF}U%a;t{2CT}Jwb@;CdZvo=ScVF&|0YME3$P35r52USd< zDnm^m@xwIfBK&MT@09cDu997yc4yE~P@bz|UIwjcA#b5xM?aVl=PPv+0)Nr@|Mr(S z2wJ1}j#g|hPy=+K#?SM>F2YMGLsB#vswI)}tip6=dEYoG?fuQ{-QjES_#1DuAVH94tRaS2we)_@v022?2O$AZUv z$RshiWiFr`QvnGla>wok11+6J7ji1{lVhjRu5>qp_Y3z{6XW5P+6br@Dsl92CXQhW``+A#x%4EAKsrLZ{1bm(Wg~=R7!ORrUP1VVnfyDrQc8#4x zL_NOe%ym-|H2@MpP0WS?WeU8iJb#*L%UXKClcTtaM#u`pt#6hGO#^JLGL9#L0r$fjWW@z1O*L_~5w z=SA)BcpbsJjt2WQhcyEU>mlDr>OUb#@qz@5ITvJ5Sy=&6?5%Yq~}_ckd;W~4)5 ze;`6ixl$%X{LE4j14j5;nf)1KRHJ&Y$+4Id6K_hvxP3L=Ace6)ON5dT zj}4rWhO6W26+l&khEiW)GBfSridAfr8+*aB16VSFQdlkSaBc!76%Tgiq|ZS-`qQ@4 ze|#4j1?Vf@8#1!pb0<5{_k@FZnryyQhM&`tesJebAR2k5!}s}h7Qw*3G>uQ9D&=b` z#yRMlenhxlZsq95P`_;1ry>~a;(7;YwDv-+DiT)aJ=D_Ms=lI{dE*o^GO|9;moRY~ zr*IVQi}H0KyRMUJ>|~^$pF37GfxXTIBL%oxs#5``#KerU_hw*0>01`i|G{cM$D&b` zeCo7{BG}12)Zlti-p`>dtdhnHX*63~rc@1dnEKA34nScI><8R5fn6(38=D8kZYN_3 zbctU@lufxnnl)J7Yuugb-!vF&=1KW`7WSV<|HlhYPM~73^GEHcfMPEf6ivULBK6px zL?Dk~2^RY5ZM98KElz~WJm^4Ap z_AgB&$0^W6jw)>jR0I2&MdnDPxE=}1)F}dCesa-A(0&ixuPY`Rk!S{PN=$)lHI-DI z%fcK29hK7kk$|}ZC^}>>fl_%}vBll(eqYkb?fhWrXg$#)VS~fVS%T}($8@1tZNF~g zX)h7xL4$VY$!ePN}qUd(FTejZTsO*)_-POWfN4z#V{IuQ9cpf9KQ!-1z@dUEPhHuw{ zi^2rn`2WI^|NIsSmhFyX`!#X6bfx~9tF>Tq+jfp|-K=}OePA2=0PD_#(1B}lv5CR{FeP@6z*6u;tgUyAO z7Pj)|H=7xurcq)oH>ZiRxr(o!Ixa#w0YsDu1nJT>=4wvcc{!P2v6nTb(hwUy6ZG1X z>Cd+oz1~}IjgXGr-8b?dFH>DFj^i;w`~DRD$K%)UW>UCawzb{amDRqsaoB>6D_>2q ziO)%)$=NO|cPzQ+8&r|mug?PbKUwLRQlDA{%k73ITP~g z8^WQbTb@Ry5DJC_)&p{cC873DMflth zyEPZ}2|~rswnha3>yRZe=4O;KNt&kUdxV~dk_I6*43ER;A4{vEKOwSBiOs(hY`6b4 z!(R-CB5=Xu&W#xfvnIf$kk;bF*GML(7i{aDhl{2^I%ahc(5#jHKC4FsZ^d-#;fifZP@EDG{*XEE}%dZf1H1dGqkl za487WT(t;_bUnFC5wHag(TI=d0pUhTIth}39__XbE+J?0`SN3Shy5gJ{ROazC&C!4 z*i=g6_wW!93Uo3#;&N`>XR|ijD*^KO#At^uQjhxBDO#Q_ek3ety{Q=6me)A0dNu8Z zDyj0y1MG|4i_ut$!4!a6bf)F*Rwc##(r7MA8v8xz_!IeWVHODe?+RK;PXJS6KFBos zxpfdfgj6b5@|m54me`UtHs;b0bj;|qG@f8m`}9juuq_1`59bJOj=h+LwvUbi!-q(#BX6)$BaJfj|heF4ibJJKUcw7lK!!-f9>|x3JkRKiZ2eRegc3n z-Y(SKg%gW0FY@msm6<2jVyH*Dk&jp&J1cQkDVKS(zk66Dfmp@_=K~A5+w8BRMq#3< z7ibHJT++}bZ;T+4-6wub_w9g4{EA2*sJ3`|YRW)7{G!6&`w_glE0_1tgt{gHyxt+$ z9?wIU0;G+S+&4+G8-US{`lw5K6v?ev(%C9canR6+Bgw=>bc#$Ca&k6G6+N{P`{-fO zy8%}sg^5s_U^ZSD&vm((;iodeW=F7_Hc%jw^O0lXjyIeeU;#>%>cSzWP)P;}<|sXj z#zVh&gVcknTYe<&%tETqa8T5WwO797c!EVl-7EtbBHSTZN$FqO-Ly)=IvtmSrlm}m zG?)A?Ci_8GTT-5+6u*6RPLaUvVs95jJfsGf@~n_Ps-PChlAd^;M%Cdx^FEe|s{U2= z!66J?(22i1h2NoRt2V(fYGG}afD0h0!G4FSY%7P7TKIeJ6txhnGt#IdOD9B8qe^bw z9%Lslfr)l8F|#w_!Q81UIfnmMVNAe03ML}yf<%%_T-bfYKlTwV1VecGP6*fMtG%4Z z3gliEtem-|LHDJBk6lm$x=vES7NI*n@WLQO7jQ)K1baF1Kt8(1ix_wVjpG%JMF-ES z+LkZVQt@aCnc0@jD}iUepv-FQ@=V%KJmdqY{Ll{}n-d>(GFc+KlX!k_WKhi2ac!wW zqTc8>zn+&RYScS-`-1X?s9EWGAznzOW)C_YpQtjuy!Bc7W9e+eCq@o}Ru?X7QuN9~ zMb;kgKT(x!^gN_AtL4rP<0V1%43?{9b_YwHYv9~b6bJWNm)qNnk;Yw8u2F(C_Dg@v zSafizX?C#qHfShr@ZkVurMo}d(i5!Wn6k$>@a>?z@9Fx0w|219KGN*wvi3xg`Yizq zgdWxuC;9U3>U=-GCAhWQr3F)WsPw0`3{}U(sV6Yf$ifiYO)@m{%b$J z^O1bS-E=Os>3&f7gw~o>CUPTQoKgFs*o@LeB6hRn((CM1?u9;ZL%Fpn7+@8+#EKFGvA)mR4J*eI=B~+&xVWYxmacga-Xi zA)=(Ge62r<4>R#Om3%IR!@OP2YR}T`cMgpWj2?rrCl4fqi85h{k_4O2zafeVimOM7 zamwf5x$ftqVmSBY4_*&q`uGDlAVtZv-ML%kC$L%=@}qJ)pVD zR*yAG@@PNz<8m#1zUl&SSvH&a0jkH@j;M>*QOr_5HgOf5EsW#K1O_c{M5t>qZ^SIM zdfT#Iq$)Hjqb9{Noc_>Wok3BNA`z!d(=yfa9nDYOMQ;QR!eeT%q)zndc<3amDb}p; z3PHHMgTQo_jJj;slL?F<6^pIkV(g7)&Y2=Fggp2R$CDzGX7BMj@1b5ivlr@+ngBlf z()pphYhEglJ>%7MjWhDT-n4)tGmpcwcm53Za6fKNYI`Nc{ni(Ys}1U98~E8YBL6i6 z_uVY8MdD59K*~Wjy<^bdj0BD**I>+jmW$j=@>#3p)AVv_d;=0x&rg^B3BwQoPBbPDMY)%o*)(DS>)webso4e|6D_<(|91U*dz zWO(h9n6d!|?ONMFJd^iYbDFl$#91rdqkeGQU}GB|1%}>}0To^@y+Mawe*ZeF;Amp| z8v#)fUQSo}kIMMBFR>yT>$G)un%_)Se^1I8Q%V6e0z^XzlMRe~EBU|^;nq;-MQQBu zFLQ{bw12S<-q8^4PS87jmZ~Jk*iZ+k4~AbLo0{5^OwLY%A%%b0K0&mBx+9upH?;&C z8f?dPMR&H}YdC|CNqBH<#H|i82D)()P@g3rVm=IWpCy%BBiw$?{wV3v@7YPlh4)A# zTO##oZZThRDk$lBl7V2)LNXmO{XnC}KG_vaY@m=_mEV5Er-U8E%;zI5!lL$%la|>a zlxF!}prD9M-zsA?%4XNS<V?)V(F17A>{wHUY~^^mbi#u zH;Uu8XN|kLq#Pz`^%TPi0=gSqW0>CTx>`B2yE*Ypt=F1J3Wtc$YkltW{k z$d4MNeHqnD8!fsP!6aafO_xO9Q%_)^n}HH*`hDq`Rd0?vPnJVH;TtUtDg0WP4;Q2L z;-o;k^?76YQ}wbfpUlkK8PxuJeLt@uGLYT#tiDHjC0XWx_tKok5l4V)9#oOsSU8Wt z=YS0~?Mq-~=6|7;9Lz%SX%ucoYvTK5O#)sMwa)}9bCRHA5xD_B)I73cnYFLd2a)?# zb~-RLMxDYNdcb(c`(sHvkbfU_L5APFH|a^-_iXX62T*W6=w7>1{G-HgKta)bb~tzd z*s5n$*;-0~p7|=8L+;0Ztt&TtS+U@skV~?&>FGfWVS=qpknn#L` zJaxc477}*x$dJYAZJ}q^rZApU$lOk6Em4qlcA%hVUonj>sZTc?vp$$4#W^43DYxID zOkO*{*4gdbol5tvG*|q#?)>4XZteC_9;3}h5jW+7mT6s^xiR79bJHXFiG1k>YxP8J zx&DrR?mOW@<;JC`#*;C9_DIL6Y=plyn~9>}er%74?s>Q4rr$CEl3+2DAyGIszti|J`Qa0MVPAD(J!eV5)LG`;-KAsRW5U zbVLjQ`WTc7!d@-@9+-_E`#&foA)P9XfkC zMLdH_jN3trS_rPpoxq&Nw9wuj^zXc_L+R|Afhwr)l(D*%Z$Jdb2xwFcONW+qi?lY{ zd!>&7iLJ2=R!|?}SmcU_@hyIKwbTOX*4{|+vVE-jZ5l6?r4Fqt44mYGgXwowYRfWJ^jl}cdKl^zn|-u z$DuU*qdoQ8zxGs|;4jbR4w!rn6MQZ*LBmZRVhX5`g(c>K$j_1rw^g~`xG`E z@Hb=#N7iZxdDW7@S>82VMmd;kOUe?AQt~WE>8e`FJczrT&zPF&*GmV!OLzJ~Ej##*&8mHu7;_yqSDHa;J@JPo4$08Q5GNG+< zQgdSXeiNm6_fC%(@?)Uh$(R!8(765$dmTr%_(zsIq#@cD$@|Us`Vmu+4tkB zd@%j)CTK43E>@%ft6d7UUhgod89=jkx)OxvJ`(%9e4itU{y`=*y~5xS2-ykyV@?-C zB8NU$o-u2s% zQI`K8`6!JEYc%|V2HZKIcS{9MO0mHA^i6TX3h4ZS#73S#G2sK6@q$ekx~xo!Gm9d> z@U%LhQs|939RE&(#Vb6dFZzc}$%mnPVUVFB^b@7Z-!S?g4u9Kiw7eHwbhe zYevf**U5}FUUfvf{}=<}eJheXxH|MgE;u*9&#T$!bWpnVkV=o;k`z?qWnxCxTw;-R z>Q99iqSB2Os5Hv}vQi&*qd=h2yjJc-0%^Texq2pOSIEw`ng3#`skLss`WDBmIsL8q z406)#e0YTALBOCa-Gk8tbn2zc17e_#W*{-qREww!Th6_HOctu($JG&DfZECt%dt$k zNL3DC!b;{iL8SQ6H8KW03HyYBvI1g53`T5ALzKasK_-YPC|X}wPwHw@6wR&n{LHYC zc)*5*UWd2o&fzD*=1kD<8^j0cnRQET`v%X`2bvzZ7REWw3hj;Z&KKy%=!hVppC}cKMT1 zkrLUTYZ1Pyg97wFTtmAwn5U4#iq)_q{a$kTP()c%?Dil!ciu-b*)?pg$b_}Bqko1wWh66m9#9M7ShO{!&b|qI*(v$4 z-T&9&@TvGMnh}4T7bdK1Nvo;0QaQkmI9*;cj4-QI{o%-%Z2Ee10t0&JUHMzk`NwDK zOW-exf5{bhckWx$*i$YGS{x<3c7G6e#}Uch-#2^}FMiz(H%Z=^DbltcArM#jvsF7) zd=&-NdLgv_p}IYrAYrk~g`$@OpbGE89}T`vex zVC4=p|8XxAR=&Uf80&`*oBDWXJeo@X7>bj@d4;@|%!jSJWo^ltGrcf}I7x#U0lBo= zS^CcLGu15>Hs}R$Kce|4jAC(ny?D9k-tUajenZ_{0ny;-(tOrR1j6RCr(r=CE>6&y z%RySqCAkPv0a}^fn{?_g9L-Phm_O}~Ozlib{rVC*M2Uc1+Lt_a6%p~s?3+DI@Eys? zVo;vAuD5F&<;#q#4!0U)ML01^G{`4e_HRt#cEbHIfE-w4s6f+UN{ph`A4Oi5ulPe_ zzR|e;I{Bep2q3D)37lyLW!K{y5ZohpHfHp zcT4m7R{NKAstQq>*2Ax427Ya{*gnkAnUP&jIe3P(XuC~X*?E~M^2GwaT>k37>mh{_Q!o0XV^%6_E%op>>bkSuQRus(jd4^bx9;-Q4f9SJo%1Vln`4Es+p{Brr?}@traebot`LB`k&YrsyprOrbNom%r)l3zQ zNUTHlRuL~qzKR~|W1`HZShuRvd$Q{w3CT|LEU_32+!ZWv$oWYmhgi%W_w*#D{=GwGfT{w4tm|#FeK*fX(wS&-1l3La+Rl zmPrPrF?b9C(DzWAy(EUDoa?x3;+eDOn_2MtV+~zq^c6Yj=Og&titT9HMqzM#4XPWl z*=(}?#kWq|PgU}wQT3xLORT09UHn@;WC4>+#mH|{o2?Tuf|_xE*^6~dSOos6E>=u_ zE+iBUMsRyu14-de3qLPsw49petaMbE)2`tT2#+N<{>ypq&0XEbvQf%kSXxEP%F86B z(5R|2% zit{{D`MvC)D{d!!~5#C?s7chfm>iiqzi8E@<|n+(#YKG<^h&37*xy z_yk!$8~=PuQRbr?(7XFnbh`ITtp`y;Hv&>R@1UjX{)5hEdoHWyj4c}Me+oGuaSnn3kF0BcXCUkCQlMTHKCS~1b9uVuQgef=RObyT zv|bN~axJ42Pyk&HMN9+0zd@}*S74ZF4n+Ux?Bq*;MF`vIkM25U;sj0YDtiCnc3M7* z)JdfVJEKRe8caOmH^&lL)@`pAs5$cpBDoMMefQU-vnvxK{%!!N?q>(`Q;Qdk-X z%&pmr662t8CMt7!ZXc{lqP4?o^=f{G%;`&ky zxCfh-t>RM#w-dPkIEQrHmaIbgTWYk7SpXRr5Ocz3)e#nk!Wjd6|S{A4E-eL9um+`a5 zQ%<237{Z_!^I^gG_NLJF>ydHtyDLAOg%hv^tA>{WxQSWs7c@2>t|!G3M;!;F0tc^$ zt_;YG4Op<}_FjkRku0L+6|`)OW$1=0Y;Y|zJ^6;@hJC)AXij>~#hdB4rK&yliK#qAt{FX-o=_Y#G(U)MSpu3Y}%RTtOii!t5RlkFee6EiIE^5B=J3$yaGDQj-NevXja!r-s%aA`Gk(7uVcE<;SFb>d^c(e=LbP@FJcTwglwm|rekjKM0s2r)L=56?JmZ$@!A>3)*gprIHqr#wwRHX4s4<@-5X&zr z*80A@-|uMR>y31+7Zz6sUXrez5p~;lqcl3v?hOXH<0AniwO<)oB$Mx7oy~1D6v}D` zA&?pxv!Ax+q2%>m^4XmF`eBb&Jtt`@U`e1$U1TyO{2^Q6|MNurm6m36a(q~~D1p(s zeXn%0J{;VvmE;yrIDeuXuRC6_4DdU2u zeGrOXtwL!>g>#t#No(>>py=B^q%BC>iqlBjqLgsDNcBo=e?I5rq%vkX5Rstb){{*l zPlvVk1Ia9IUca~;(dpZj?1idNZkzSeR??)h&Fd=iaY zt_P8l1~Q&V8q85pl)8F2URgb+(tQ5i-RZKzh<|u_qgd*VNHHn;i+fWzbO35rapGNa zgt9e}&$$)+5Nx-%_Yr>hhhI!I%n9nhVe=Ac-Pfg^tNQtY&&3J3=cTAic*nQF_jnig z-%+P#5lj~b)Jg~A5Q1_)RK-jzmGiboc)6J=m6dlJ4}D*#+2bsJ3XO($sVZtLu%8LY z{;8M}6!EI?FE+x5_-0;Fw{gY7>#LkU-9Kt-T{a9qX--4aMpGCiUu{+`@eIT}Xi?|b zyi@)@Wt-JJOEV~~+p;s4Ryhmz{~41>l8?c}q-h?3=V)-GqwBquM+{&CjdkMRo0+#E zTM9g`Z<2=_&w?gcmDgk+)28HHCyOB04r*~w!J^h2> zBFmpfK-!urK-aIRAK1qD7`8s7=Pz;SE&h=vaT&5veonl0dQ;hALL{}x$-zq9oYF4% zx_hbVOS+T-_1|Z4YnaY!e}G9st{+wVxvh_Wg*p0ccA0u)xm_S48QmuUd-gyh!;nIKcOXp^FvrR<)pI)A^rbD7wH zIH~oPHOG6ETW%<++c3>x*Li387}=$iQ4y6;R*`ud4OP!+(_zghU&6qk%!|UihndM? z@@CfUGI|#^rx#9q{ica)9nF5QlikmMZ0Kjs1B$>d8IbFR!lx zOqgp);05@=*&Y-X_E{n*0)8>0YO{jj2-AM}19E~3tk1G0rp8mNiXqp2TOPg1u!Nx4 ztSCjmQgQcFaZjkO)1Ni~0jer!+aeD$w~~)R#zqo-Tz$1)2PUI-r11C${RMTg!ne}( z)a>WQW}ISFvzn3=Eh)ci{5Wbr_Gbt8z|6+Tl6A%q5usL~A~@^RYW90x$Sf+tk#5xk zwmFQzLg*G)QT{d|eX;-4dt3c3c(&2Bi@u@6)zfUUpuYLS^1BF9~Iuf2XhUcJ8e5kO?=kMWB`=1qxJ!G?^ozl#HtxvVif>8! z>Wz3Ky_Y}#WrHBWgp>92`9CHY=Rz8VowndNQ>Khk1sr{C$&3c$*n=VnOEo}aHZPVE zZ+U2kR$UV*1;;k3T;q*q{P=yc7h7q=Q?y|!pD0SuIKcw_pW(e1ub-ZE$Bd0DKh6e%@D5%k$4Bv~E`-~Mwx$gbbq$_RY!*EpIB>ZK6=inAa8E$~^gp!_#U-+#Auonk z(D4(wz!Ri=_saSybTAj3p1lz_6Vs+GPAqgmxl1FObJ<9!1z+L225i#vtlYu zS+Q&y^7-HC)j7t)5Jc)XKGPiE0hFSt#K?;_;q(S>r1eP3A6nCx#SCZ{8}}HmuX6=|H*H5SKFW++am*6}|uw3X1pO03{a=e7rASn?`*uNhB2b z%rxM5I>PP>j|d$P0F+jWx;cb`c%Ke8Lo|E#E2_&lvQ z_6qfcYKTWWJBOPk#-xmQ;6Np8Q1&G%AFZz9VL(EFv&(G|??>0;#gNH`aJvR7^};!W zRac4PW7k?nv9_>MQAftj>OnPv0Ro>YlhGdl5KISV4g4(AZQBrd&&3fS!Xf@d71!hD zJTEvHDaNVgWtIN-Rl+x!=SAj7K{0VYPdZ&hFh{BFd%H&*)BR5{v!Fjpb8KGJTQj<5 zLElUIpNPCzg@%RX+)MCcR0r6Q$>PGdzF85j7na53V?MU=Bz$<^O$ zy0*p|EG_8sRz1nx-;JZ504J2$E)6ss0zn5w#KxX>i2ARUJSLx2et+zk@u|r(d60Si z1{yG*zRM+X1OTl}a8HBATR)Oa@Heo&pF-xRSi8e)GnttV??Cq#__pN6s~0&J24`{V z6E)jB1=|EctYHwq5ml=}Vv?qblZ3}AHS7M=A!hll+W|X&!q9-39DSOw}blH|SauU@4=U1G|(lC$C|R*c3NjH`Ex zxN&97IVNDk>Q@JNOFJoPZ{M%>zjL^9WwKl{e?W}E!n0Zq%_+mr*pSe5t0k-{HP0v* ziKAq0nQVEj!jvAqFX$|-d*=$3A?O=j`Zo2T;qYRziV1Hp=RlH=D^zp7$pe?`JBhrP zMMW5+cJRHq-%KrqKjnb~qf8`T4Y|{A8RPKrLKz{jucN0mIP-k*WT3vH@Ft%fN?^$W zK{_S<^DL<6tuw#VXq6o503sw*%(nO1#24_ba;)1~LiIZomyE;taGN1b9~d7T}|EH~40u7~H$R`1bbB z2^eJaA({DiNdY+A-v?~p0T8nUC zTmBeH$N)=^^IDSLaaO zSNlKUC6)+C3A39&i^{cjViwJDOUzM?+}=zmFJ~xFOu6V5#s}>PV^j9f*vQTFI@{Av6dwF55tsM7!YgBqas_KtFRp{6Ech+ zJQJq(tD_o;{zg@sx?N}EI2@i@hUk^!f;+zch9&_7kkBl-UBLf5u1 zP1O1B{^nqgYr+*(6yzl{mpyOf80$dXV*bK<5VY;Bq4pWlt&f@qlsgr6%>Hu7vvC2C4W%*UK zll7w#44sm(2p4TxcX!U2JkrOHd+v!;3oW3`KrEtahfD< za;uHpv9NGnC9?3rBtS$~RAfX>rJy11xHN@bUZDlmc}E6ffN2k(d$yI?AJqR0E&|#H zlWn^2qHr1N|2mE>jz^7kBEZfd${S+NIm*#0nc=6^7yUe94(BYH$6CHK=ITe5cs20F z6g7OK+a4u483K9dldB?<&aSlD;esk#ji;TfA#^d*c-3eLi(a?1q(TO^7-vxgqdUU2Q) z?yJt#&wsr8{_FQkeCoQJuY1)WPhRe~?oZVQI1{7xO=p$`DQ9-1g?^aLR{ve4E6;X( zM!2Y-%St2O*e!BayXTAP>A1js?K#CK8%vA#%K6nfW7k;Uc z*{C<~-zaa4v?6r?;sDZj*B3cpXwu6R@Qy$RjNgBvK!HQ{`N5R++V#}574ucoZfrTQT~-teY{=+00?-ci8&7hy$grhBBewj#85W}2M==pSN>!YGf&N>U6Bj6$dsd{OZrpi-UW~3s ze?81O_WpFrbQ~gFXT6+`Qvn7kR1EeZX!Sj2paA<_JQ(XGq?_=Ri2b8~|Jt_Gl1tDo zBxW+beljD87rCWez-w==22y8P>uXXO7VSwGDQIg#l3fDITQlNqkQ)3I0r6)zg*1eG zwWA3>6o|PkeiU1kypXOx zG`uyNpeaqNzZqUNZUiC1Q-G2--B~4iq6Xsz$!hH(HDvJEi-X6WJQ{^7abn5!A^g_| z1uCA>-1#y>k8FN%bTY`w`Ynv4j9?9+c zkav)qP9U6cSH?4L8GT*m!@k|`fMVFwyG#~pA=-B>wYb^CUp}6atPxcBE#I5%niy?A zxCE+NHM~QV=Z}2g*lH42hI-*Q+VVv7^iR|o;&t&kALECodwkvRq3y-RzBnD#ypZ_N z=TysvI#;axT9-CtkU(};O#C;bN?OqK{&|`q;sOzI^bN+^pROdt-$5!SNv&DR*&=kC zWAV|Yiy=mq3wbJK>aksvO@`Xu- zi&Czj!7*9TD}pLti3Of2RjNp%VW8NCj4G~=ZA%+8!q&(t50Q3HdAPxMh%HVsbKa9V z^MlW=f5-B-A@dWOm&<3NJ)7~hArqX(o9uLXU6dZ{_I=!~6MSaA@8r;itgCfF>L&6P z%jp{#~SSv#8mB|0Z!(r(@}q zQz0}wgQsU=Vv>n1H|<_6t)BxA`~bfnTOUkc8zr$bxCm7Y>k4*jPg@!xUX2)YjHDUt zi(m;{Uc6@X`+w4?si=4dP&hsV?+kQf7*$(0Gr8uI62Li}eWw}d>V)-er{{Qmoj1h1 z{Q=i482lcI8)1b50|^2&y!>sMjcr}e=@1`>E=h)AJR>+4R16|9%re}R>(gfK zV2=ItfI2j1JRc-$N}Ii~@R+ajAN{DIR>2L`hxopPEr6Op!1?ZN7msU46Vgo6ol2#&!mxPT~018T^8OH*1cf+`-Sl7ep9m3=_zw(^zRi0GgS7l zddD7y%i+8x|Mjwx1l}4M&FTys;){&89<$ami^wz(4nkNp5{<{+A77ZJq!!;yQQ0vb zd`F;nG(w*qaqNT|xTH4dpoQBm_BCl0iaGd{_=^Aa@Q}F6PX^-A=Jq#}<5hK+v@O0a z93Lv&lL|fVnioExiMccjpF}ObCnLn_bip-9Aw-ojUR#B3l|cKrVM}YPTy~rN?c5l~ z+25w@KLivMzc%k7;_u|<_RV#~AAL$p^cD|o6MURAlD#QnAroiY`Xq{MGpEv4ON8p7 zulYCFA4*#^zdIaKN41H-k-eHyqds!C_&i&$&P5yvE~e8&oE`nYwu=(4rHQ?f%3i4g$B<@c>`19zktxkoux9xLCYbzyQ>eV2K79lfVvGsN0^Saal4t;w6R`BqoS$O_P2ecIVqJe7-J{y5N7_%H|&OC5`m|~l=No@oBgnkC~ zQ8<6LrTt;MHJs#bPcum6k9g|u9~5M-z;54~B3?3Iz2(wzn?YsfclC#U5f7e>$~&N2 zoGH-ePp3N6VgPpb+Ty~l9QC8)>RaV*K9ut#x;s30NlfoeAM5~c9<0N&i)fy{7_E8y zRj7z+GTx}T|M5(F@T`*#!+^L8MQTtwMO+{6R-vS`^Ejg)6l-){&Nyqn(%6dZQ&%n1 zsT6M{`8`x(imd9?geFz$AU~(zL##VARqB_qOp71rFcc6W%FVwqE2?WeCx_qR9Q2wg zN`EYMfnXv+nHl7roMJ`I#WUr`shnPiX)_hu570ESys2NxRUK7F|K8l6CQ$bJvY}D% z0>Xi$%~2R8rd>ZoaWuB7ttDks0Op7`OmVaM#@i}a(6wH%>E3T_ z9x0SG^1BiL^Z>_w$rUVO%}Dn70@oxP*E~yoh7a996z5q`zC1o^A2|XO%dGKEmlE%a zT5;j;#VBSTKHmkT?EK<|Z@m}dC{;gZB|=(-4x?@QyuuG&g(}lUuH0)XWhi#s-Nv2& z=l_EVMJi}0wEkx;JG_;HB!5eH#YWCOf`rteMMcv~u&Ju`hMr0L&$UOVN<>!=T>bf8 zK@Ul#h+-yT6D<-4-RsGjO4{x_A!T4z5So*fBz-P*M++rxvR zfS@n||BrTWzd1xKh5b-Vb6K zgam7J7K@GXMenoUAp7oOb|-L9N$w8S2Y==?9)7cm^<`QySl4Fr*vM8>V0$?JEl+}) zmIc$u!@XN4CywnLCaMK^f8#=Xe(nq|*v|-A>#hgR;QaWYFi};HR9n$zwcI$e;MWbq z>w~a^lcuW6ISYi=8F7UL;>$~B!(0j2SY+gXK_JsMg|q3)Map7&QK_e7`Q^QgSeXgIJX>Q>~12ff5^OUo~W+E$ca@$)Co zl@roTng6tT;nWj9#v(_FgOlD(TbD1H=}ta7$;!8lDEn_sn}ndmi-9*tUH=gB(?r=u zxGgSMaLZb3qq_g`bYUmePJHrnBSJkhTwz4c1Lu?ch}_e?5w+__L2v-UXRfStRovD{ zKKlSupn)R0Hw$Qmyl=cMB`adQFy4G9JDM+C-dL=T?a??1aPNSxAz)UG0}IWYVB{ZQ zBICYnJ&;yEBeP}^z&H3Q_gKA%as(_QD#&;1U!|n4T)Y64Uh(w+n3uD<5Z+C|WCagt zy?UIuBY#YV$+t$1LtL1N&#PX0H4s6UL2pCw> zuPyFIej=iO7+T~cA0CWK~mIYHJ{3MOYRqz!{4h$P^P zt=3oT-F3YTriP;rX=?${k#Qgw$b)9x28$ZzZWj24`&0jxYvbMZ{i>>|uK5ELA+JLx zUGJ6@;6u*_(?aco*VfluEb#ua?14=>GQ-!zC;Zj%HcG!{eWfU$`@Q~&s9fz8yi1}Z zHyhHe_SIi(N8eQvq-CiN@7M`Nu$dlWz?Zs9u1Ymr>hF_Vs+7?a!C72mQ*kGTm^-S1 z(r`iQiPZtty3Uu!CGGzt;VP{63Dw5b-{(FcL9pP@wZ2*jJBMGF=ZJkFIh9T6RBkZj zE(_ZM@sm6->dCt<{A=RT(UFEQ654a~_Osn-tYTD41R7HKUtnHTY_;*oi@~q#)U5EK zuP^6=RH%zl^=G-GqJ1jgUAd!RBn!D58_G+6aigHdLc0f3BJkC37s9d3OmVvLirpzG zcmS=wbMg`czu6d7+5{pKlt+d{fhj5mpd)WT5OqL7%yO`&Sm70z?rEi-CW_+tXCk2Q z^-Qx%75)ae)2Oot9m6+ZZX7kLn&ta*b{mrQUv; z9P4~f>cG*@_N~)O$k=nt&8N@MGJy^g?W{x5bHo?09oSX_(g`PRGMHPJVO4l)!c7*t zh{rW;ZISRq#Ca)?VxJe}r%BvHhR;MhJN%*2(=oB*1vk_2y#v z^dr&LU2wc$C;@+mpu(5JjuD2w#-*e9Yy^8(=E_AVsD#T<>S4n-J@0$pCL>U=E!EWD znwIC51}2^8rsAe{7MqN*Wk01skY3)EL z8&)qFP};zVAXTd``t1C6Kc(fr`?5tN?Ax`OusB*SPIOApYds|}N+=KmznsaAZD*#j zHlcivUBuK(gfFjfs!ee=J_AM2IdcDt@;k_<;vlUCL=6hyMS@R*7{1N2hMbklSz$ zZ=}z+RuS-&V^MqzBfV|Jo~bpLg)K@x6t7_$cMBefUO7!Eej|5b!pp_RF z(O(3i31*&Y8xxz1|ACsrNj4Hh@AfX?pRf zR__cU*T+PJgskC@*a=M-?-=w}ft&wGwm`?eB|SG7dlH&k*{;GP?NGlJ_~c|+^;VMN z)JRkl_6{%Mw!Ai8IP~^z_U!Jv{poJ9{q2pPSKr|Q&d>#Fug?MAd8GT1$7>rqvX1y9m! zB4DCxW`1c`tETv$jerJn*wobb)l#|T9dFA-En7T&jKj!#2C|QX%6-fRJb}18p=4niWEL`Q* zj5byqCs!Yt`m^}sCmpOaW+TM|tRZtDn-ppH2!G=CfD&2nq9s@?Ld3ipRy%;=w<$6}#{kD|p z(*%28Z3AmcFqy>AiJ)(^X!&l4d9oh6a0Q?nb&JhXXUUu;(O~^mz_gnJfp0sce|o)q zDGpg<{CTiD=3X#6YR8s;&s%(Zwek91No1J6m|pyIv0v$WRH${6=iO)?P>;_)MiLKD zu45Nd*AE5~G9`$l`S7h_NFQ(Ku&dX|xMh%H&OyG6DkbCYZg#5KmF2r4zgy+4P$j$u z=t|(ii+&NI@Q~aS_KY3t@xzwRtNzdC{hMl~f6t>9x=FWBrkS`RfbUpT^zNV2irx!& z*it}yCArkp#OkTtyqIz;Om!&S@YKs)3%XcdwZFT=W}rS(rxzW^zpv*awDjXcboG*m zN=+dq8?ukoL(hkC_b-rmkU`{nx~2x3)>zadc!_GHuX`%-`^fH#YH_Vmvn_fj3cf8M zByEn6gQ=ps#sXF40h)^S(LhUVODIdk?2ay{d6NVN8)HgiW7dJU2hnL8B~%L*kK!{` zn7&z9e^A8vxm&lv?&TLoqi)OqVigxHFoI3>Q%7EsjSrgB@cEl=iJF!JGmrUmXhVX5 zX~%b7%i#4g7OyqhUSy|Qa8RacZVJv~TtE)J;kBNt{-QbWc|W^tSDRJcEWOf?-N0&( zE(o}(*z6Yif-HuACh{B(S42JOwIo8_YEA0looI}I`<)BAd@eW`azeNp=%z2-lNk+8 zonvB96gpwyzhjwZp6PN1i1|dHu12PeS2s?PPKF zE^ZB>iY<{yqCnndIi~8%3(Uw225F61Uxlc5mtY05 zjNFlD^f>a8b0@|`eA9c}-0Y7-FOfGiK<|Sdl&_zg0$&zmVqtvHBIa|`Se7Q29O~~* z8Rx(6l=~gz^m~M7ITz4%j+yEkC5F`=si|TVn~jbKv?eLZl7IDK{U|Z`EcOA9Gofj& zwh;C@Ly@JfgRK@l{&Rha?uw<;p0%Q;hAyPf7*hU1`8LL)ZoU4e{T3Wr+ZYNTw=ru_ zt|%XSwtjK|S4o78nxd&iA$|u-@NQbzJ-+K#RyM>1zV71iBl>RRqr zxOTcCP+r>oV6;T}uo*KDR2<(|IPp*ULS+`a{z1h+38~)O`x48Cq31ZrZfN!zuS?ss zLqGzRfTfs=sp&PoY7GFQQNiK{_vLC(m@ARm)GrZ1&+1FWQ=S-2H%zstF0E) zWAYkDi+f(Mze`R@DzqQ5+bvnv&O7<>*`-$R)fK>LH`53VKn^s7)(}6(PM>!AQ9WjL z^_0iOX0gdJ9~}KpO#Dp{cQ*iJ^^mF@f;jtJZF;}C%T9Q7qB6etub3L`T9xrxnkI?c zb^p4}_Uyo!w%kplu-X~Urr%!G+uxR_Bn8V-l4ok0kI&#nxc7CK?9!N8xIDNbNLCR4 z`J3(K`A%p+UUnp6!a8nYnYcuU98tc>CTsXgpsLwpwE!XdPa((t*hbtFdeJ3}>anLy z(NFPVhXL|VqCKsRM`9xR8cEpz#b-tQiJL&^26`SD(~Nu0AQl|wlWt8sC4iWU&TVo! z_+$n{pi1~A@aTWsUvpk*_kr#gnSy99sdAZ)AlVq6s`))`Hw7Wokj(>R68#CAiqy(J zheDrKXoHQgE@;inqfX#+)yK@0XLZAOHy8N;Xm%3a{BVD>u*c2A@b zi9-}4u|Z5em6tN+^MGfG_ZMWh>rB+D^fX0BkHrkd7Ls_>z%dr7Yk@`raET%uR4*Pc zQJkOm_!ym&XIwrB?v(VuijEVew1eB&U$t4p&shse?+%dm=1}qQoE~WNMxp}T<~l9{ zfaC-oZ>lJv*F1s(@WhgbNgdo)E5j>?HAWBV5MO*vseiUqpKxy~v!^##bbGk)I#Mfl zYC}qQA^P%*pRf6AhrsCM>WZ$KvZ%G5&`=t{{v5`XK`rXBn2l+?qEoM=iS$7}w*MVP zSsmWf6wuw*_W?{-Ed(4Ne~$Z@=VOkUplVd=@I^4=tojxn7WTSWR#_Pr04hTgAC@2P z`})?4Io3|8bXY-IGkSfz<>9qvEKu6&aWer3cgP9LAUj7>UtFq`hXVTSexOa%p^Y^^ z&3>ctIp6~rU0mW)k4Eqa(2G93o=q3}JrirP9kJHBKSoM=wgb5~NlYJ=JPz|DCo+2* z|JYT)g*T?o6v`QvRIbVYr(p0!hx<`BZHOo)Vfo;M+OyauuaEGwk65jovglL=iRKmL zA1pI zHqp=%6sP`jy|Y9no#wAN)xX<-Uxo#%x_~-%78Rc<^LNpBe=TFM0zYU=?X~8gjlY+c z4xUceahKcs7Uz6RTsyIc<^-hsS>nr36;z=E1 z4!D~cgGjTU4RoX*eRSuegAGNI`D60LUpxHEZa51P7C&|z&l)*Bd1eLKxrm0FQo(wB zju0rol~ZaLR~3J(uW{j@V?G(Df@Ed)p3_TJNhz;FO8V7I{@J+?f44a^B(rkjeHnfi zaALTYaEFlO=;fnDbQvvd@@x>~HG)s$UNH%7j_UK-87WBd&0kV#{mzU&j#QoF+h%u}VD4M|K{1@`#C#%amCvwI9gq0O72uK#PGaJk%zi2d)Dn76 zeD;u4ODN(8K@?-E0I(txA|fKbiTXV=`{nwj!T@Nlh;I;sIRqakDE z#<9ur%c^L}KANgDTxMfnL)9|v*J}9PMR<1qF@f18ZJ;Ub{nrF`0WFCP1f;&~8mGNE zF6KQtxQnXv;5OUINz$3E{%F5tjY>mg%2HF%)+q$>RM#Hu(a`X4N0?Mp_3G;C&rH#$ zCB@~*lD%Nib_1&M-Zwx0y(VMLpW2TlR;?><)48M9=&9}J<>E0Gle>a{H`@QzIRZqK zAnoH+%0Giz1}9RTiZ~(-S7lHON&dO=D>#r`ZEEDZj>?A|&rP%GLxxl52DS{-EdIx* ze^^u`x_WEncdV*X)!1@>D$!N+3j!T{-HYwb)q*WQBp-m-v%P2~;xNnvkVi!jdVIYH z_-6VNw2#!TpCInr=&@U?DoN9NwMFdf_!m%JX+Sfz)K4KDY8L;BMF(_p!?l=z2d#f= zJB$cteNL;*b8&}^32P({;4w@`IyNgxyic3$HbWBbRyv-hx4N9r3IlyKC7Iju`(g%% z5lRaqpomP zQ@LX8yGX#iu#1`UOSJZfL$#g*y5}3iy0&_2)kv^{F}0`&bojJ`(VQ}oHein;LnniN z#rOz9VYwy}{6K(WtTY2?KwK4QNV@9|{R>WGG{AZa0|*@XvgTtH0C5`|jKN3uyZ!yM z5Jz}E+wp$ErP1p_Xv_)#AE)+6^!Z*p^#jxID?l9l`wJjfHR8`s#Xd`*hPIVx)*aJ| z9(p%})t6*ox)7l3G64^5l5-521jF37*p8K*1pm%p!bDr5QJX9|G%M>ixmxSW`wclp4M!wd4snH$O3KK=W`(!6&G z%i~_%Mi~`yR?KvB_pEIPM!ACds8&;wlm?%LG&6FksQ>%wQo-A3KGM5%P^T;L7?d-C z6)0^Dd!dF*;By#Ij-^5RfV!S6P!lHjB`k~v29D^JbWgD{*=hBed4WbF{|WR|6MH!( zR2B5117S53cm5)g(K(XXYvY}Gdtkw1v^wIH9tKeYaLrZ_zC8PgyHTFx+w**th0yRE z>_qaxBPk#sC|~=y)3OOsdR(8bqMB5a1uqMLxjSN7%9QP-z@qeI^_-}iMOt%qM>X!O zT4*j>rX8TE9t}pwrE#mS1l{E6!V_X5gaEPnWANDF+-dpC{4G;cZf#=+3p^Z;W|~d- zHthm&*CrcN=N>XsKEhQ{Rr<>sErR_ZUH2miK($Fs9r@w}d%sSa#%c0=Hs+8n%t?zj z?BZZ~j7JiXP<@wxzv;Vfo6yQ;KBYWL@Ys+y7;I>75(=x`v2Ja%BU*4c3nY` z5&@BgbSa(E-Q6f%(%sz+k`mG-CEeW}(k0#99nxp=ezEu1<2Z);LtQKv&y4#@XG|z? z#o9Mv^0>8EYj$N`=3)jn!e(NvfzBpu9Q@vJ0@1%My;~yALhVkYFKTv?GK9J`y3Hi= z0$)O)nvk9qi|J$`%SiBodL!!Jc$YW5VdW`){d3WH-XBm<<>DUjy>kmFVW3gL+4X8g zusnDCL8T_yWmDC2q4{Z4i(%pn+yYHP3Im}b|H=tObt24EdbXu|)4-0+Pnbbon*r5c|kcn?R;O1`q?ATiNX^M0OLx>*&Cbzt&g$K!W-l z!RiXKoQXlj7(RDc^+V$ zuB!o7KSe+ny-c(lap%M>NUy>L63I|+ zbeJh0>uaP69HYn@0eYn_tALg^tdfFLtr=P=0qwfXsZbhR^15J#-`hZ2AFU0IBI-Nz z()tp)9Sg{Vyab(F$~b!6yq&O44Obg**w#Iw)<4bHT!KS-!YK?Mvm~;7C!`fwrD)v> zL202QlNd-^ZR(a&;z*wE#_MSh$1AYSywXl=DKsM;3X-03h3M*xLaqJ~eszW8u3jx7(w zK8S)Z(sVu${Uq6px(fJ+d~`I;U&=!`aH9xf+YYAyvHmol>XSD4D%9aDhS9fQK<`t5dZJr~yC?7ZD_WNJDjdyBk zoYQ^>wKhrET_rN7tn7z4XV%N!pa)mT^gE>tLSDXCK>@qHI^L2BD&;!Utni&6rVfM% z-tB-&^yoD0=4;=3$%1`-O?Q z=t@_QHi@@@Y9fWt1nJUIS#^0e7S0O1q@;o9D};DD)ktQ>M1)!8B&|rHQ3av}p|&L{ zFA-QXSgB3QZC)IwS312qW)iywwjlXOXjmciIjn5TA|oQgQ)}c;5TQpfrQ`|RAbzP- zlI{n=bxOUe&~Y12^tHdsesq%}f&f^yfVK{F^i5DIZGu#FFVf)!pvBBOaN)YT zu#U5uGK#PC^u6@7O-p~Z&v=@<#9on|<%5@%=ekSZ4>2V&zQ%17=kB$-&Q&Y0LP3MC zsxNO|O<*_DYS5=RoNWwpZJCSdAbHFEeP(|?%-vtqd-|<;m0xu>HN4O0(XJy*Db|U` zOK4^BevubKhrpJ@4nMOJgdFd>_T5QU76n`*PF_Kq;_o4UDW30Iu)N8h<2ujRN!~qU zmi$J!uh7zp_@$Xkn7BS-;Il^9Bud;(K~(wQiq}&xK=o~Odp1CK(~z^HK1jo-O`~0t z^2?Jw7WM6$O`?;&S17g;&?r7X?gtlcxbtda$(lM<8G}YEkPA^gyAGAL!?#Wh8)X8<^i(ttu&*I-14^2mfik2yK)aZ~rwGi`)CB|!U33N7uti<(4) zn+9KPbMTkp7I<~KXdE29Uo@@Nn_92_WRJJZH`;ouku5d{({#4^!aYr9uvS!a{b}0Yn(3}t3PUnJ7wD3&N^~j zuXgn*h;N31<6OGx$vKO`gdT3P-*Ish*Md)A#P)wLWc=1Es_Kk(|+gTfpf z+Pwy8TE!1k%q2;oXTti~CkI6DFz>#}Xf{aZ(k*5!vuf&>%;;8onxpSdKZGzbE+FQ!qSTTcCedo63%5G zwEA-m9_eCI^_kTsg9aJlIMNC9KB#WBU@rXv)Va^M_Qg6IKajR%lw@jS6Ku)3S!IG( zHxpUpm*QBCrKkb^WhAOx2HOXE4Y4hRfi}rwUYpJUwSqiwWW0cRT)ZlMF7uzu#sZLHJ1p`KJ#ZjeU6+4m_6YT6q{G7+&%G`d;Ge^pB7V9%ZuK730_Vqf zBH@@So!N1Dpo1=wqkNmlF21xuK%g2`g*B18YGAok9a$xO7BF5#gh64~BgA%{+GOy@ zvLSMk>6bElfhKK`A`dNP{9wrC<@N__B*@sZRHcql*Xoy$(E^fxUzM$59vK9#rhHW~M)d zsD?R6w2{-r22jtU>f5z=k}u?|s;Twv+EG>FqY7Bhi;fr78Su9VjeiIU%>bpAZS z+3cROb8x7ct%IFQZB9FwZ=zS5Ezc7J+K~9CfK6QlDI}eKJ5WQJ!B`Vc?hia0Oxf>! zkby&_Le+9LXV!4b?R?@5{ECufvT4~sF2F}s3Cz1{P0kG6Jv|CQtDg@L)NIkb?yj!U z)cIN$?ye*Z=;>agkrcG}cS(R`7_pq4DKe<&0^w>q;Np1!M9rwTgHa@KM6}#2g}bqs z4(kb8=dDj^ZwPpb>TCT9H&rVQzJ#<2fKaGJ-JswZaoA`qOfU!J#}zZv;UUrBMor*e8Zt6e_EeGDj)e*PEUw*SR0^ zG;<%mAP_spu4hN6OOGnFd*U)c#du~t9ktKJ z({b1stRzEzZ~48KjXxAN%S2LUe2aUUd~52ZOcP`nN1{26R%$aL_7ILPK2y)gYW|~) zORwXj8>-PPUQ_^$+-6^N#4qGxp)Qu1h-ABjtJFI2?UIGuzHhI;6x+YQG*M0n*&dOK zxr@&=5;ZsEq&iTaoZF2UJfe$M9rWZVgWfdCKJ%M-GTsWAsJh!h=I}gX@p=EfpP;`e z(oP|l^akGlB5)BBO-ws{#s#(zATw>@dl3rB5()p5Q~0d2r!!9+PcZGuh5tdPtwmxo z#z1X~#+s%5j4%r~@|N@KyveK++n;}pMt_vt0psL?bRvy7U=R3{ zI{NRQZEu%*ieTb;M`mKR+#pTYazXrXyI-mX7Dq(RCxiY`FtSRsV?j|$rt^6003kzU zr89Qpu?_t0SQZ>vSU2-9VHDpME|_QW-3}N$JxG0KhEh3Mr%_h%^`Y4P=}~vifg1lE z21d8I+Roj$B;Dz0=DjkIMggTsOku;HbXB(BAplZJ1z8e32vrrYr{M!Bwv75ll9;Ig z-tm3p=|aq=YW@AQ2JiG~*mYTQ^1An~dac7eT9)u7GjRg38HJe)Z4!CiH zzw$2we}nM2gmZrOHL9GQ=CMxUbVY|c3PvpEH~5)kUIfk5Ma)fwJ*+?yu+4DkpsW1+ z=4v>cE)+s)a=Z|l%lxEPq$G*?`lD`KQoOEiHJ95-uy8t)iGuw3TvQmJl(Yxw!Nx{` zR;v48X0=X^b<2Qr+s2~(g*ox=o%bPO1iO8>>-Sa>?KdS-n;`}{%ITC6ovl(S#{1Rz z@tD`60zfSFBkq-BSmwYq!DuAi)!RN}BC_|Z!FtCwHB@dYA4TSVjB3^OU!*nkB95p#t^H+VIckyM{J!v$2v}?0lVlPJpeO@oNK=sTy_0r5i@=D) zdhZ2t4ZU7q+Zw;PH4xlFsKY=^dmLR{3(tuIF!CAne=W22?Uq!bdq*zkO^~??j`6XJ z-3b$bmd$m9*KHTu{oGwIwkdD~D@usZf08e~9W|u$Kn~|UtMgRgAKTBHYrv6R^7==t zOF_LmN!+kTlPDF7*3TFg{I3Fx0dn&VF7mNp?pw$kZ{g=@upwh$>bvKaIsH`gr1N@H zQa@MMg%QxriK=Yjs2oGL94@Q0sZGxDHF)1L?x?$hNzg`yjLF||FMyYjEZ*P9Ig?kV zkX3PoKz~dN61s^EP(sV#@qd$#C5Ei}_s5(;r?W~V=QaLW8KF8#&*+p#a2iC&#o-Mt z3^>kuX+8%Sz2QE5c6q6_xU;vPeR;!ZeYz&xGKK;v2T#+w{{Zl@?f~F0kE&O zB4vZblQB^ykR+7`JFaXF zY2*WN3n}`kY97s1hhAtPb+}%EQqEtziQOM{lZfntRPk;(@5NJK#v&0w4)MrHPBx|oiD%1qq03Tq(%zJZROdQ`O9UsYQae=yUb8Uw!~N| z@OO4EujC~Plx(y8Oa%F@DU%25HfM+7Cv%x1fOu)gSJG@Bnz`5NrA6G(HaWE=BR9pf zozaunzg^;5xD52I?!(k}j~WpiBlRSEe;P(|JI(3Vi4(pv~q4jsW`8Yv}p0$UEB6xzb_VAVQBDTtYr#fu*V|MV;-&M85!j(hQWrD=ju!m=B|7cRMG zW$0yv?WTn3Tor|E=lZK6%R`#C?c=CQwUMBz&rUZ&d*(g}HZ~g6Hv8HF0~(7{aExr( zn*YyI?m-Vz&zg3rzQIIBgt&TCmGgjrfH8jrbZ0k0=+!t(i03oJZ@_gpLY=}F-EM(f zgs;PnL?x~I)dF!%4W2rTfrls!*q*YS0=vz?I%MZZ*f)jmI|K4&1%RxvFR7Gut!aXM zS@w3iIiG1v73DcZ2&+y&==C;mpO!Tb1q2~Mu*O3z!_c|LtXQ1EGad%!V(cKi|V zU$5cbsF=S-M&8UOqmSwS<1{hxxUy9TBn}z@Cnwh6;tGNtWWFISmer{x*XKf%_GT`; zU4b^2XL*6}BFHI~EX~>XJBkb80SO67w}mAWDjHh*2(Y}C&%N^zm{EaZohpj}3jdL6 z*Y-Hn7CGmd3#MPPSVzdCOLsmubMdYCdT|<4 zlEb=h^h?wFYE$<))YIP`-nv?Dij}6!PE4$QHw`Aj;^=H#AJD5&jlI+#rnN3NmD}!6 zY}zCpbnYEZn{B1i$MZ;hQ*~3o%Y&Y}YB3@S$I`m$hONc1f(D(*TKhN2MZMkd13b%D zotRb31Lx!+1Jn{hh&l9qh(!{TxDmre^l@Tp&o%jl?=;fjKzS5O@|)E^u)fE}gJY=J zT}tN7E?@BQ)DjuH<}u2VpLsI@?@KL^)8FL>YKw~v{T)$<@=p!2Kh2K^l8=h>%wZS zci7h~c9*Wyn+B`=A@qfFj)MZVw!h6UHj^=S=9h;@s{(q3(3Omo1v9$Di6m^d`uMb) zEf1*`rZvvLQ7uWxPM{vI}xcyh#dR6i=nZ2v2f}yehAl zw(3^M=#zpOScNd#edc80eZ|9DBK6_4_=h4UzuyaLTv%~4+1-ZAJ=0pg2Je+0G;lWP zNCJTLS(%D1qwRuzzp&s+EEt5mc}fSHVKxuZD0(LU)Sub~tf(g>@sav?iOOER(wPTj za#{exN@VxM<6s9^w1mbw+~$&_0@^A}S4WHLm2U$-21$*(a=gJppRXdk@XjJMDHmfi zSrghDfcGN$mKZvd_~KQPE8hd8ba%%L$M}? z+YIJK@T6XJSK3deo7}!_M?=Zn{z8xx**0 zPX~wBQa2drEJ=S&<*+a|=*NhQ7zua8Z5iH{o%w>PNcf|G7a}skI@G&uW}qTg3l-y} zYJqF#RI-170JFBR_v##;d@C>F0yIuBBO5M3>%_YKuyC)!knPxZL9*I0-r@|r8obx^ zx%vS^3Z3Ss-f_O(U*|a|A)S4nZU#JY6aE7f+1iDXCB%^8A$q#%~dfP$Ru;J=|ty2fE7P<{=iC zx9LWm-KV-D{hrs20dhN&AI2PTHC`iWOdU%tu+{5KE7A~L8|=!0atP!rNWvV-EowEiv{&an3JhrmU|A8p)R$JKiIK0GK9Iiu-WME^s4I! zju8d5$e2hoY0C)d9lp$wLED!(I{1ygH2e7GV&V#6>ILZew^f;3Ykv?yh^H(J%rq#s zm$a@#q!NfMa}AR6)UlC@8%(5WeTan9HSSv>IFrCpRXRvC#3_*tP;P@kf<_IOJKtw2P(H-kF5%##ztVUx~ekbgkD>=;`s}kz#;k7$0zW~l0?S*%S|!!i`fc+#lid-t|+w%u!S3rAcY#&_;xp^E2~on?pGL{ysnH+9klP!%}aA{-FrO}(H9rd z|8A&6S!fd$4cbI-oZ}CR1@^YVDEvYm;uy)VE^d--fG{y$$D-61&fMRe+er$cR@A^P zWcWCD5>c}V-&r?O!}v#Ll_!m*W3dq}k@ZQ!LmCbcAOt|8+K)N35g^Lgqf;u&u6>05 zA(MM|KKFYpMn3LKtXrgMcVg1AW=DPK1uZPm5#>n+L942O09A7#Z>q1VU+evxtIit9 zd+i_wnl7yJy%Uf6YhQMLIt^n*)#Zh>^12c8qRh8C)P09(NAkHdr4H0%P=u&_Bl%@Z zADWa~F$7OK;?TwMaVb=U`qg0_yQuN8{GA9oj+1r{yI0C)>6j!9qHU<8RqN+=j#}3W zPaf0XTp56yj|`Mq6rG_a4^_2ute0jSH*D1#{S8)wVVHci4i7XTNjTsM=Qy_vPu}`T zo1Ad@SQE^Y!Rl5bA`uY6XbJkWB19QYVJ+$LwfLeQ{^p9=5#MJiX7=FIeS^ayMMdg{ zftFa-Ye=6omCGVYb!y$wgOeq{E-G`U4RYQD!7;x8x337Uy;AH!whBo`RCOK z)U-tI#lim4hdu27WTDviYXz61rgGFq>jxOV&M?B>&c*619PO9gKY3}>^s7bQhX+q3 zg7lKEu)9lQb2X-3-^fQwNuthB5P#v93vt>-kTb+&Lgf`eGf>{ghE88#=7mlDBh>x3 zix^N0AK+jXkh>Y|euH+FTI6mW^_kRBOk+X1Ns&3GxO(HOL86*P<7q;gTKZ7rI)2CL z80l-J0CK?wCi}f{O~`F$0$jZ6fQ+@-TyMhuDz*W7ahbnv$r{~yjrsaV%Wmqq3T~?f zy_>Oc>DW7&h0N5TgI?fVzf2s6tG6$~O>@~S|0Af0Bifcqu<2SnpdYp5v5_O5Eao^z z<>yX~jo|Wwk151VdBk_n50=_-5{Df%SrI~_hrTJjI|7sPRh%l@`<=G(20Ldm!y6-y zI$HwTryKBwKA)#Imap>NfTms7%V#DpBelu_G-%`tTw#3xdOI85Dd~Ak2$Ye6i@8W9u zZkNkf*Djstln~Vtj-zhS?ymKD1jE7QUIEvCIn!qX@;4fkSHRi{ z(?xe0Ola6AVS#-8)gmo7CW8>QxkraXVM7d~BAhyP9_#)!NF`e+#=e+Fw-;yV=9U=! zvPVLj*fEFeguJ{b-EE)&y58_XNVWrF9OViG8pJNGUeand2&p8T)%89dleXdf~EdO zWs5C<WWXB?Z4+lW#lXq+rjY2Uk&s)*<`xhw>ap>x zYW4fb-39#Uw1X!i>mk@J&W0sB>jM=b*_ZDuUWEd-1+r@sk@9tsTI)gjtahrs}#`Rmy9yHnw@^Y z8e2{kZsEff1>MrstNoBl0LqV^p`o*lcBL(iGV;)-X?Z<36sQ(Lh#+S`+iU`m$~T%{ z+&=LD`D$=U=FvAa5@R4GRlC=UdQRE}I>k?Juqu5YdE0niZ048B59LQI>y}Le$)n>3zdlJGa^Qm*aHRveonn|y;|FE(y`+(oR0(*35IQyn4zgY z!W<;gWDBRniBT+Hu-4)8{`&QlD}4NKhQkUSj0SS|Sd*irX}hkDPg9pKh*yGybjFNg z?x6Q%zT}-1PJdm*XAq*FfnSVt_A->o%u#q^?oioex%_?YB5u}q>za1i!D>1Y9F%gO z2FwyQ>M!Tp_D!QZh>ymsx2|i3!W#R(AN-|LrmfWb)S5pL%lb1$Iq8PF?G>TYzl55V zcyU^>QZbweB(i=hj>CA*u_E?e9rGYV>aRuD=l!MT#^y{l9x(y5mrQa!85F&5&$fl~ z6>_T7tqo{40MAH_MQ`NhD%k$N%mFT899F8si1hA@={wUCb|BZ(g-bJXMlk4gh6VK$ z(+3`&wAOhrcai*%f9nqK>{eno%V*5&Gq?GEV1dF)D08!>ZhOe?=SXowW%xH{iS3_- zth&34CU=QOnA<(P@aaZVDT|*Enja$gd1bwnojF7lm)&9RH**A+W&%{o3`3g|rBx<5 z_DHl_+@IZ5T3sx--?Ch5uEU%$%8Svdr_VAcjQho9B;v&b9CN;9PD*Coz_v96Q@RiNrGGAYsGXF+ z%{TK1nlIsNbaRg*-pcs$N~svoKHhx57{<)}Og90WHvt(ItPcNNfg)9LL?MFqwUI1u zqCUU~p@g-oyFzP}@gXGO$QZZ0)5EoO=aojIAVM$u&V7~OC`!6r76~8kF4ASpZe&qk zcnySJ?EIluz@UB1Jl!MUrF~6y>u~MWwm?rM&`XhmF6*x}R$pg0Z(=9DO_|nw>P-5~gs$*;fQd)Uce|M2AFACu3=$&3Y z$KCaHTk`B34Z>=r8jXl<<8RZ};vgm_W2Rbufs`0;NNMaFNE7(>|*w1u%VJU(jY;7S}SaxG+%KA!xScw z5@wMY(ndeux+}NHkSS?Hu z@ad1GGE)-~6`lCW?^k8k+y(q>;w4$iJ@RQ9)0hvl6FGn(pDP9*^F-dYh{<#fLS&eA z{d~Ee=wTdh4;s$?jDq%m?E})vIuD^?0E`_63|F;V#M`nCCc%Y2cYyqxY>3y~G)ZJZ zkiEQg@M76(bVrE30!_@*#n)TOR^!iIpNn?AIkif&Bf&p-WG}BY$QqG|`a3GssEq^W z^>|ycJ&*A%(?y%jPt{I#>3WTi%W-5J5WCnLYnK^vf93%fr*~_UeF~?OOy@w~0w0w* zy*!x3ia)d%rHTXZVvozo?P>|z8ReQ~vX8>Z{0OyNk4a$=u?aeAuq-@ma%TWS5KV&X zbVsx3uh%Bzp98yih2|@)HODe^c;*k@*>PVxh2rF_-7tO*uLy3`IDi^0Hqf!H>73Hi zj6LRs8BSvt1T~3SVA}ZH>iKY=`)%)8S@W;cisBtqvm7o%?E&2e(skRTH&t3d=Zphe z;4fM>35y`}Y>Z?&o9mQgM=Ei!%KE*lp;=@EG?0^t*78$Z4%h;Vc&!yYNM&IjJ0k*M zq9%w@wu4TCArT^8INh9OA?_!HHY6`WplrMD4=JAiaYIm<+&pK>s_!&kj7us>b4P1% z_Lr*_BYxCKc%FLx{M*`)&t}m7&h@zYy2>o-MIfl4CG6sMTMH%=V)-7tM`3CFx+Lq_ znEh!@;TRlkYEL!6KzKR^_%xR*(A!MmcGZQv4sdkRSpmI&oC%e=G53%CChDnO)s3#{ zI<3qBZNORa6+ZWk){7VHPvS!SUj_lRJyy|uN4tO6-)JJ?HQs|SI@9O1Y^#P7CL>VV z3<@qMe#ekGNT4_0pAG!@C<*Z_RVmkPF!*A)Lb7sLFCzot=*Zg`)|p*#yedfI%s~>D z)^m%`pr)?s>o1N6^omhx$NM}xN2^vn86SCA-?va+F1wM7Xv+Bt-S!6?)@XOv$GLzG zWIqsWSl6h|dIRp!Pxj!9ypO zIr$O*%AHu=b{NPTY!i5)P5qyY*;Qg;kw^UfE6U_L z1?8K}eSk~*d9wL$TrTb6CQ6;0WoRu1L_~DBnoZ7QS2d2@&khX*tN-KBSpK03^fj)4asAsPH5>9S ze4)|khEoW;WEFSU|5MtP5k$rsib|a?44XLwJPD#~er$N`wjZ>~lmC@8>!*-gqh)z& zk4uQfpXPfYO#MlN^KIAgVueAdC7KGG-${4Jl38i9p&}_z?}O4Y;Ezy&F9QCE-$=f7 z!OPAdfO@>Y7*OZA$IG=>goYcMQ`kBPPM2|*-a5bryRWoF?fHG}<_DGXFk&Shr{gFf zjdl}_(#9H`HW!()c(IZ>osNuMXGry>xNp?qZ07SnlR;iG+Ms{i*}tzp|8Q#p%q4wF z+k`@G$#$tt?BPLpRFU%vR|^>XBmRW`lX|hsI|{xM@8EOLF^@)Pj^E&z(nW3`EV=i- z_Q^uT5+RgpLi#%(*?Z-mu%B=YL@rD8mLfp2br$HB;9+5jgW%js z^(oC#;u_>VkLCF|geo&DUPkROr}e5={+$@*kAGGqhV+#jk57{qKB05QzH$i3j1H(& zkcY@ZPN5Ib0!0YxY&hc#i^~pn4i-!3o0sZgVhu`M?E(x|4qMK?FeEW&QpW0QPaThN zh}9nFNJT?bJg+9?d31h5lHz!9R3ZW$X@raSw{pgXD-;gT*mrZLgm;o_3*`oq>Ww?@ zNz9`L9|ioQjI#RwzVR7)GXE7vqZWOjyyVnenD)QiLa6XCcI0o~;~whTIf^lCd3595 zT(>!B9VUvnV9iGN%b{aPY|D@HL9}LFpxvakW z_B?O$Q~7Cb1MzyPBh6bvd(Wo_C!60Bu1GBZzqH!2G8n2^Ty|UISrvPMFc}pC-Or@( z72qKlR}q<;>pwVdsqBwlEK*wpNtoHZj5Y$)mHD4@6?(#gKXik?RVPgI?Q>l;>@M86 zQcVMf-kCnG@yn?U-^otZ5w$ND0gMo&|M7Dm=&L~c?EpWA%-c^`0p&a7Zaf{F@T@*% zx6Yu)G|%(lcCuDm8+6S^PhA1M8->+Ullz!_#MssWctFL(>I$6zyU#2sr((?e_;($q zFj1r4)`l3ni+xG_AD_ww2pNI5d45D&Z_c-ZOJ=tM-29PITz$7cz<3o_#0K~E8op-V zIsoS|g-q`zbCC%enOa0@kS&M`{LqyJ9`^%SB>2{9Cc;TBbz#rs2tHlxa!kZnt<@}R= zAK&@RwFU6cg?+Khf4!wp(VK8QgR5+n7B0e|46-~~tq61&%vsNW<2CLp!dU1JCWxC! z4B`m)0N=21bb6|bF$3Ta6oX`F-A@No@>Xi+L$am4AK5B5?mc6>WnIj!Hrc_x&g7)8 zm^9f+U7e4`hg?2>9fL*-Uo$K_!Og zn_i(T#h0%`_h$)s7t>dT2Y!?o+^Vc7&5PAAL(YYtqz{w>wJg-XRRjf&{v5d;3keVQ zT>4C_+u|X=u;>A67LUSvA9;Iu-0O6Bt7LY3N&58SN{g<*;8_5D@`v-F(6cG%hp8&j z3IU9Q0(k`2Dwc`Vu`QFIMbE(CR(He)YBTs&k16TePCHM|q4oK#e^Tgw@8?k+YCWgW zUz|zD@9gZVtF?fH{3f3lVloF;Pe2S(wN>t|P^`u*L$BilM;nOND^cSpq*FH?0E^q# z2nx8)nIPZwuYVDauk^p$z|((hy5Yh%0YgJ^(d*TlS4t>l?*ejkE)+wjNwk6=rdvGR zWV-h1BRVrDK+3QQ*iGI=)28NiXc7pfqmhUhyNETN4|8gext)HJ?@4_e>_Xt+`-=BE zbc}dVFtT_h@mbCFz4t1u&JkqW(tcy6W)iRl5zSB;h}`@oBErHpYse$-EG=-DO@+!n zR9D-^&EElX86h;h1LG6qmhW#T@%xeYLQUU}*UqpkC0O3xVd>2W^1yMS`xLnRgS383tt}|6ovP#c7 z&b^MexEVnwxToykhWheROX!m=UOKx~^Cd`q(S^-s{`D3r)SGpiL*9HsP1b>iPZ$bBhb^mTn=rQOek!I=Z?97BF&kdb9uhXfzygK?==vwiXt&>PnGhET>* zq3gDDzIAF2`8fnki&ZzoEc(I7UkjCrGl6N*bP@Y>R5y+lyZCb)yuT6YK03KcZy}*i zX3qDE1R&6utJ}s_q)GL&1+2_^_~4mwn1-;+&b@8i}Qu~z{a)i#mgv*>#(z! zZGvC0Ug=p47`6(J3s~e_W*iM~&%LjJ?W0hI%eT(oSwNePL;PE)n~)LI&hUeC@p7_L z^ih+o5c^=JL`)S0!#QidShqXumI zVHJZS;ZHPx_igvhax$%TM+4{+Q9oGS8N`2KbQ`uLT>^I;4QO$UHJ~6lV}U~^SZ*_#?0YP; z6HM=Q;>Z9ji~VslIPGc>e32DR7#yl{qSt!r{VW&lKYvHTmlB^O`JE#8D5DxJ$(2RD z!D%V8c{2_i-7WR#$vi$8sDoqyImIWV<5{98HuELZ_%4tB9j;n#W%4Ml@Ca2#q!)xHf&}i zvmVNKF$3v3jRV&A)tAvJ7O}Jmy}B)4{;Gsj2ygxsts5BC_A!q88l(MZE6D#_DRi1q ze#j#$KoyICe%Uq;KCMG>aweC~bqe{fIoCVz3y?=FtnQj@^2WnAUY;{f z+bc?J{tS%mq=>$p@HlY9p5JFUV-XJ>b!Lo9UBmw}_a`*FF$`6+0i5LPsK*l-c8M9*e_?o#9tbbNSqWcFhrPdw@nzpo>d-b${SQ&*GT<#Q{wR zY0tA1g~5azbgnsyIGaMDKI|)B?k4wb?iYqF+Q8Sp{)HA zvRI}F+y;l@uoe5~L7^%NC*5$Suvq9aM}io|ZN zCFli@^@14qB!lnUUQw7wcapX|KHN?4Ze+yI3}i_~=07Wzhjhp-4{K}GN7 zOe|Hfd)&#KpR~ubaP7~_)5PEFi2hX|>136H9FJVxnB&&V?HYelx|#Q+BxjfaX!5I1PjHA*tN%vTXkRBo^TDd3YheK;j6|ZVBMX;Qyjp{y; zl%~HI6#KpZWU+VI4jGa6bc*t(2XQds=8j?f?(eIu-sPHe&m|g^<{AaZ;WaxopWx-4mLC0Ab%Vd%8aiI8JA5#2bnCk-@zdlTX=di(8&vzs~h4?3j$dJ63{o?KYr(aW$WOjJTrf~BjqDz zusiyrf#X|`_)C-V@d@I%V&cgn;w!~z`8vf|xldJJ9GdvF?ADl(vX|P;C(e^p4bQ(G ziNlY-A{nfaM^|aGt-WusOvjxssNr&ubgFmSL)?G)t8+qTx?d({0InuZve@b%R?Q`s zz}X?^{^?z{d?5~8L`z#qYU4)rv~~<|o@jD79Z_@0%7N6TmmD@OF7@I!W!9?#jLQnm zu9s5S@X|3E%eVyvMHO52x3;(7li=Q~En*7y_gpsxRXwDdo!wLSl8%YoZZ#S<-58nZ zg=Q4Av@GXVv!&aUC7Opf2Nr%Flau{Vce_vhA+c10_u^{G(ij;O0=_q-aW#??GRC*4 zG$4we-sIE8{AZsZeAm-1zuhGr9i>gI^Dj1a^~Em{ ziE{3ERqnOmAyxU_z+(x@KB{$vcXIC9i9@kv0rH|2?_{BQgv#>z4G)SV<(oy`C%)kN z?F*;nUAO!GG&oKSiL%QS1K8Y`G#sWm4N+7+_@Rda=g3@QvKJk2JB0uz_lTFEXV?@Eq7uxc%6iDq}+N2V{HA^3gDgRT8@0L`x3$~$&J04x%(6(^oI8py(RAlllex3X-<9{2Y?B z0XA|R56qW|gg|xPVrJz!lO>~DvWx$V>!Hd;+HS{;0}lCYFZbo2gDqQN&d6e?eWCIEd&Rx__NSn%$6T$CW1z zrmYQrGoD3|0?=nS!cAsc)T@Mq9I2?}yXbYneGNoqf?O@Td`pm(Q(9%V z+#=vsWnrXhCq8pd?>ucYyuf~TO;vhfzMNyG%*Y#X&7w#nddH0$qLb(GQOTp8QdUb4 zY#0C9Gib}qrfFr-59s1umDrryi!UANDHQ+p$9(o5wCS^M2nApG{Dr5FX5l$X`SZQ4 zWI>?baZQ)~iEe14X(CxHR77>F*KLBgsGjDRSF3QE%Q5S3Dz*z6ons3X$M0?6TsHv7 z>#vTa+}7+3If;~bEmv_6{ZD$nHH?9$xueb`=5+Z-dB9~ml?;HBXHrciF+s=EQe=WZqi_CmP**w1q?-b=e~g2wsJwDlWrM=0#0uX2lDRLVe>`>;K3L1n6| z*16W`*Gd_UQacoPVDBO=%P%#1aSvlw;l!+j^nkhz0MOH=Kh!}Lt$2}5dj*9X-I&^3 z2MG-Vq7r6hE|mPVu6Kv_{KeR>CQv`M2jD;6q|st};m z0t!Y+M2l77aeSZdtd{^zw2#GPT*&iwHy6>NdY&EB!7xUvii#Ar)+maK!6{@u;J>+S z#+1+wHqd_46=(6!H|_bCS_I=b>>=t6k7MrXTTY4Al{~NBzr%uCKd$Plmt;<}K;ZO0 z2YKXs=%qxVrg-!$hpJ2tAMB}}{Qe->o7u*7Rg|RaOZ8V*{1xefWr#keu%;^K8cb}& zlGrL)ru);wk=!oA6*}rZ8=GXZUmdS+VS7xvx1Ust`dhzP>dERc+_g&}z*WPtU!`XFv#^@15@bVH9LL!0` zqlFlPQGy^wbje4UQKJS?h9n{qElSjg-bU{|MWT0uQKPpZQAVQPgS_AR?ppW$cmKR= zS=JimdCobz@3Z%BzowP+0Yqjs?YqcXS&DpzX`N2TUozkh%2qmT>!Tgo((YQ@#XhH^ zuQBA~gb(~oQ+?ZhizFBKRUfI00T-LWM?mF1f|o-%Yx^VXIlqjQqII7;Ti?|hT&Dh8 zN=hmsK3*tpaH3{G+7&O>wDaLj29hKKI`rUJd#^;zOB3aGM!VTkk0f`!%mPa~rQ4wA zWvRCf7e*=yo`TllM;$Ig`!0Zrb*8L6esv)`11Vd%W)1Z+dp|zU?Z|C*-(Ckzj_TmT zNhVcqnwar%bveJLvtPeYdwDmnxU`?MeSoyEYey(WKYG7(5Y*%)Y5W8NOnTc#m zf+@xCjh4AtPn}-EU=ylCos;@6 zo>GX1ZpNK!@t7Usn_Dqd0Y~6K-SktVW6Lq!_?l6GKqG+2m3N#V!`Wx@y(y?88FpRL(~w``PGfLCM9iabSNzZwO7oY> zgQ;+E^W4SEGV3su@5&TZ0zRN!7N=meyzxDd2lW@F-Jl-NL?NWz(7yJk^2T4yay>qb z$WK?p+!n-Jx??EH%x|9PI3@%PTn*0SUth5ovd2}7BxH*M4;M(sM>=cq5CCOPSrb1& z{Pvn-`uC}>HL7dIi(lNr^)9?{AftYtGgT-=tsj|l-VgDRcA*)$Mur=!2FdSMT~ogM z@V{~w?-EnwTl7smlw59(nFSOc_|#HF$Mp`1aBu|bT(prk*|%IU+KgW~fTY%sC~Z_x ziNLLKvnjzM>!947*_NH5-!leunKrfeGMLMl>4Yp;C$7ZLhabf&Zz642`NRjis&&_a&%Bd1LBJ*MYF6i2mdPMz$Y-mruC8%;e6Azj3%_gB+}SCf3+e_I zJgPL}#Rc?=sO;gcieMyC$4g^wJq$ay>ypDv_P%G{xvjr{#$B%X5Jg9*S&+$>Udwf_ zdD<7hDEgQimzNG2f5vO{@0ZUj;R<^_&#WHQf1{tTa#zDJBxR=S@8=37*}L6z)il-+ z%3L!^ysh6SX;Xb>tr&Fh-XOU3>azA>yd0(-q}bNJJD%Df7@nxM0FD%)OZp$4Iku3E zi;!KqSF>{OjqGz`Z8VgW<;k#L*cZk!EA|SlwLii8(L~-{l}~j|3ieEV=-V0pLPD8d zW;8zj#yu~!2YR$*+DK2!c7pHp6;rLJI^#UqH3Dtfy)}?IOV-MXEMm>*YT{=_V$V%P zO>?z;HMoaW3g`n^N+A}OmU^D1+_X0A`{JlVzySfWW_lxD)aYD7=Wf}Ps$tjess{TS zqWEt1op)D1me`7dCPf36&KaRVGjsvnuWMjicNr+`6pOcJ5hg>Y|5*DI>*4qcp{iQd zZdy9Fcb1KNkGE2HNm-T&k&GcProTu0oJ*s4+x%doPSMw25Q+}yd1wUo6pH`|1hVtJ zE??b>kOLSs8g6Y)wVt4%Z=pP*v9Ym9W1yxNGshtN^9zd-Qc^nmO6lC{5x~eSz~+tF z+;q9&um((ZIv8Udh1wW2^WCZ~fAE>A%PE=u`zp_ZM@p56%xLY%jlI_Iy#V2$L~XwA zho<(CS_d!&MaIA#m`dy%_Bzao{@2#I(@ljro?Z>nuf2zB_OI|A^Kb^@l5gb23@A?F zAJ67wuUVRYFgsU;+7L2`G7LXaznEwk2&u$ z#=<8*A4*NXzA^JgV!TkliKsoV_7?yS-;|s^+q2mYX0U{@FqMIhb*TJKrPb#WLR(V{()Gn)OdMUoNCD-}Ume?yX{T~A>eshsbGV)` z{5O&ug>T0Tj?e|`bux7~eB(DwDQ-uY$xd}&nkErPyzssNcs6M^hs~uwrNOEF+P9aT zb-xGn&02ykCQJ^erKKspVB`^TlSXyXUo)*-m;h6qM^6!BvW;=vt_Wob{r05#cey)glupU6Dtr2SHYP)aAG)(v0P#m zSm-@-%C2)BGt3+}9=+z0bVR%g4%YD71RhzDMDXG?z!LnRwUdF*C-L>{y}>ifw2Vo}Qjg5;-sXb)=)B z7mVaSLjSE=tKV%`myRPv8`+1m3ZdR1aBC#2OepEtsuPk&E~uNG?hhayQ|pWt;zrLs zz02{rFZVrvTYjcw_jNe66E4S^?IKdDxQ0A{-w*!htR(ee3*n{11{@i1(0t|{wrYF^ z^8&F)xpH8+UGZ!15()kbOws931?Kbuh&VRxO+pKp3&7~O>*S(c!9Hx~p8k;kIgX?- z9!Fc%xmOX(M`p4!lh*TcE_XO2%Okr!`WwHyJeog1U>@(pK`(NTe?xtBZyNB6f70N#wmJB_Mt; zEmSt5{E^#>nOXK%w7lt5bzZ&XedbUF=E8td!bC(-d*V&~`hAj7OKWR{@BXq`L|oj` zgYMt=;qN5v2x29xQTnFgpbVk8v?4=QlgQQOR(%z?8y>&D2U>!zZ&v(r5h(c-0(h1A zg{JR~T?h1>0Ki?`Z-r2>e!2oHtHDhffSwR)Wx(!YVF3~YLV5J_EdV-Hre^si0W?95 zZWyzREo2kR)gL_gU@%(aUH}Rdk)EU#K`^F->E@(3sN(4H@Bx)Ore5qDS-U1)#Ij;A z9A*XC7jDL+UytNHlq0J(yIXC>-Z>pTZt) zbB^vQ#)zJdZiY1CAkl&MOKH$fJE{w$H6Oo9 zgO{>{>lITC@annb_kjN2Aude{93lFxrgTeMoG%E%I%poQ`OSj*Lk%XB?lJ*ukl2Mn zo*zlI{r2YXi$`ksmiNxPdnMrBH%4Oo$!BPutG%gNu;aGeQdVmF)9SctPf)k-G-%=; zg#Q3sr>{rS6;mxJM84hM|0!c`HJDe~rB*2DTgj7e z-0sI=yL660aK>VOo;8_i11x!MjW0w};Q(OisPFG=3E7?lO8vqq3Zl@@r8d^99uZ6s zG+GdU=xT{S47b!tRc<;QWJ$q_L9iFtyX$dEyV8^=@^X>Zk!L9_SKfr_<$J%8Red5t z$MRtm<-4JR7_;lfjz4$5(l}an`_7izd;SQR>Yef@)B5-bj1bE68*5lOU|TuR95bbO z0ZOIoXq`mSu--E@{hN`6tv#ABKHG|GLt-W7Dj8|aKa?D_S|zJ;XUA-Qh3g_k?}kICiBRFp5{2xZ_pNY1FOsJx@`1m%q&#m@swiM19;dN z2<%wyICLjtV-HajBh|Js`exn(P`5xv9{Yj_eKW6)l05ox!EB*gbJ@rK@eYZbRi+X* z>mGv(V?|^*3gt3Wx=#u9E}a)3jhQ0*l&596Xis9X;sXAzG10KZ*aw{2=O*_CR~@bi zBVbj~m3q7Ftr?6gPU%Q&KoV`grscq7cBWcK0=PX%e*3=QOJTF~?5}Wmx1m8H{6}gb zKLo&iGp+Q~!}F?W7-z#zP1<%f>?g{QdQY`v4cv9&ETuPR6bG)NIpw+=E<9!b=1lZ= z@7cKE1Fn@-?cwnQ<-3&)qpr`=sD@pjV*qAH!vy{^%h_e=r8>h*{jaIu^-Q__Y=WEh z-R-GhQE_>bwKs4?TIyrCV@}(+(tNcnm{6sJ^spbT)D~mKh2RG^x)P*xU*#ciAx^bj zoIhie9{;%QyS5Lv&fv3!hvg|DWiXBRDICH@I9(3D$sIg^bbH#`1yjwsC}vPqfJois zEtLV1$EJJj$VQ?=yq7OU{H^=A(jjRG7_W0>UW}JhYLcX&=bM>Mgs)vow~dd)5~14?cB$+ z^D{J+hOh+~v3Fp^qM1R|;G_R3sg&PA`SvnV$i{4lm$*o=&N^Gm&9^Vq2T5-d4Lb^# z(iaUu@g&+mQCiM6Q!}xhMx7hYVLNQy;V#`3H+T>Ed}+a$TYD+~9RAiExVzHm)C6=ltAq#s&q<^xSYHJRKfB1@|$x6rK8rVa3mc9U|z*OF`I5}BoPxfECTL^ zt78jD*M$(vqF3!5*xOUzj;?qvs}KNwSnRf$>l%l*)-nwNWH?nf*o0SZzx#H-E|Aai zuj2#F_iJMrq%-y$X_)zDxtvZ4|EA{I!Dl>j`ZDm34=7)4Y=e1N4Xv~$hOP0T#&f=W zc-~_VkBORm_4c%1ry#RqGC0-KwU_SElE=_Mxzh%)6&D#j6i9R2VbVy~n9|p;HdL$A z#_q9@f1`^$JKHvnlT%GhxMJ|Dvjacs3Fk7>zx;v>f~8%^>SaR}>gm!RrB|@TFDRhg z3LD2leFfHhixi7Pt@(#t{SS+^8ZSP)4dgHwA;0=au-W7ydxA%T%+;lp%6{zIleJB1 z`R(5Iy_|O&_8VGhrcrOB8O;1L644}+OwOf8pk4ass*1W z+0_&ve{^%v9kUv?5V|s?9Ds?b{gg1bi@zxdB$2$Jq*3cA~1u=^!%5PqxC{^0C2b6Q0h{V{g(=*!GJ9DP+kg@PkOC=xQ zX9#wPgr&jkLRW>;T{o!k39(Hm1R5zhZ)FB$`f;ZFAZ=f5S3h)>b^gO9;k6Rbcv+1g zC3#dW8V;&yTEWl-OOVLwtFD6^D7sz1F`x!&GmXT?2f^h1!^9o`eIF7XY6my1=vOAwQWXec&Kt%!*ZHR-rFDuW#0`t1L$ANvT6G=h+L!&7?eDJjg25jzxq<_}ppuO$?2}-)Bq<>wbpeo; zADnRXwiWs`%%q}#rQdOf%M+&jP4HPYC>=h2I4!F+@VEHynxZZfKthY>p951oIO0-@9f= z|Je=RuxAjH5sBzd?zmRbTyY~a*r>p;YrMb*h=ww(2wEx%M)SU_$9X6CpD-H>WhJBQ zl->tTnlH+Jy8}a#&TP;#1~70Jos#fa=kG;_^u!&f@q&m7=dymO9C~yp%kMFTkrdu! zbV%7@WK2{GS*{$qA*E6aG0(mKpoe!`FM$(zFZsde7uVfZhF+d~8vv4PlMucaq{zQl zef8re!mc@rR!+j>z-WHS2Fk&k=c~^SM~D7A!P3Rg5a! zqGcX$!&+3d-hSG@dBL}9$Kpm>9nG^V$@@yXZ%8~uB$)Gh5+zF-Am#BDU)H}(OJJAIDY5#?dEH<^A^X;d;lybC)K$c-br5YQWKY=tjiD) zrk5&Vs1d2j{?pf2=NddAWxMXUqJu&aXL52qMOZn6AX@4>L~eNW>OY%>F>zqMlsBW? zaGa`W^oOmRgK^tqbfmjXh{oR#f__;SW@6x0o_FjY=8A^`=IR?r$)|yeHtu77JX!&U zU5{Szib`%K$Mxi3nOi$|m!|8CVho(^*zTE7m@2`7prvxw4TZW~HoiSRx8?stJI%Bf zkJ4J+rgp_7#Kr0u{|H>zTRT!G^?oEhVhO?heYI7Xwb?&(6(HXqOKF48PGaMF}9`IsckV=J$@L5IL=POb3-XmZ(ji?^dWQd&brNFuOfFV=rt>;LLe*B78L4A8P5DRIUJ$uq=D_ zazYa>Fh#*|;#b`#$T>D<=C6B?7i824X2(XDWX4*|h+Vk9oaRpE8kFNlWfi>crhR zc0=1QtBkf69W@;}k&!M!cfHaq`fV?uIpD;OCyKBSP_6d(KSd+&DEiIce?1ba+ zG1mr9huzS`yk)W@dUWJn9=Ue-+tkO*O3rw%?%I=vU%Jd6DcK#!EA|c-O{_AnEUUZ- zpB@jd-BVC4tCzgL(R?~0DRw$?Nt+QGfl98tss%Xyx^p+iuvu{W#Z`ZC^aO+0(Uw*2 z0D)%NsP4^~Ig)3Oe7LX@%oL*ieg;XQ^ZvbK*OEMDSpD>HA#FV?pzO;SsZ&I#9O z8$SORE?y!mS9D2mew%3Fef3VO1LG+$Hxl?%`K$~51T`<9o58$ z3h!8pIkE7S>W&WY%v{dfmdq+-U}UNgL$AL4E?t3Rpy5^h$@CdSM>SXO*J`2)#zbszF3lruD^Yv?yn>QLLx^$BmCMx4 zkGbQONzb_cq-Ck;K^QR^e0v}Q1x#LA-6r53B+VCLmF1UsUD^{FIND?y>o2DRjE5WH z>2J${N1^g>&I*~w8|drYaLN;u!liwEZDbHPEWBS2-NmR5XgS_Yc zLqQ>t$cU+@f@S_CP0y?F)d5(8v2^FajO!1rwH7Bz>bjVpygJqbh;^Ww<75WoqNS0* z&zv`=#{l&i2&L&gS8Psu68L~M!DuO?^T&Y$U3ITMZaa5nC^6<4M2rKwKqr>>o^N0C zCQ4*sLa(~qfotsTW0#Xn7+(OcQm_b{o`&@im3G#9C81oxh%kIlOknHzf2Vi9=uA#5 zySGT-UATmNMg(jmh|wA&Bo&`}I5+Y^cw}T`Q}N{ldUAc*wDfcpw+uL|O2;WQ{ud2; zEU(_hcI3WhSq4O^+LA)Q?0M^-kb)6N;`E13skf@&ge|HGrL5%JG)!r(U1cd5CwB9M zx&OU}|M(b22Dy_LMaCZfM*#k(zd8AtpBFf#>Ul$L-G3A9-{*B=IsW+wq#q3DmCp)4P8s}3xBc^&|NMxBGQm{h|9_ePJ9z+m|Nm+J-}2_ Date: Tue, 2 Feb 2021 14:37:45 +0000 Subject: [PATCH 38/77] fix model size --- doc/doc_ch/FAQ.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/doc_ch/FAQ.md b/doc/doc_ch/FAQ.md index 2026e7a0..2af9006a 100755 --- a/doc/doc_ch/FAQ.md +++ b/doc/doc_ch/FAQ.md @@ -361,13 +361,13 @@ (2)inference模型下载时,如果没有安装wget,可直接点击模型链接或将链接地址复制到浏览器进行下载,并解压放置到相应目录。 #### Q3.1.17:PaddleOCR开源的超轻量模型和通用OCR模型的区别? -**A**:目前PaddleOCR开源了2个中文模型,分别是8.6M超轻量中文模型和通用中文OCR模型。两者对比信息如下: +**A**:目前PaddleOCR开源了2个中文模型,分别是9.4M超轻量中文模型和通用中文OCR模型。两者对比信息如下: - 相同点:两者使用相同的**算法**和**训练数据**; - 不同点:不同之处在于**骨干网络**和**通道参数**,超轻量模型使用MobileNetV3作为骨干网络,通用模型使用Resnet50_vd作为检测模型backbone,Resnet34_vd作为识别模型backbone,具体参数差异可对比两种模型训练的配置文件. |模型|骨干网络|检测训练配置|识别训练配置| |-|-|-|-| -|8.6M超轻量中文OCR模型|MobileNetV3+MobileNetV3|det_mv3_db.yml|rec_chinese_lite_train.yml| +|9.4M超轻量中文OCR模型|MobileNetV3+MobileNetV3|det_mv3_db.yml|rec_chinese_lite_train.yml| |通用中文OCR模型|Resnet50_vd+Resnet34_vd|det_r50_vd_db.yml|rec_chinese_common_train.yml| #### Q3.1.18:如何加入自己的检测算法? From 4489b7dd17ff720cfe21bc532321dca2c7f5856e Mon Sep 17 00:00:00 2001 From: littletomatodonkey Date: Wed, 3 Feb 2021 02:47:46 +0000 Subject: [PATCH 39/77] fix model doc --- doc/doc_ch/models_list.md | 6 +++++- doc/doc_en/models_list_en.md | 4 +++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/doc/doc_ch/models_list.md b/doc/doc_ch/models_list.md index efb75f86..d647d032 100644 --- a/doc/doc_ch/models_list.md +++ b/doc/doc_ch/models_list.md @@ -1,5 +1,9 @@ ## OCR模型列表(V2.0,2021年1月20日更新) -**说明** :2.0版模型和[1.1版模型](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/models_list.md)的主要区别在于动态图训练vs.静态图训练,模型性能上无明显差距。 + +> **说明** +> 1. 2.0版模型和[1.1版模型](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_ch/models_list.md)的主要区别在于动态图训练vs.静态图训练,模型性能上无明显差距。 +> 2. 本文档提供的是PPOCR自研模型列表,更多基于公开数据集的算法介绍与预训练模型可以参考:[算法概览文档](./algorithm_overview.md)。 + - [一、文本检测模型](#文本检测模型) - [二、文本识别模型](#文本识别模型) diff --git a/doc/doc_en/models_list_en.md b/doc/doc_en/models_list_en.md index 577f2aa5..e0163972 100644 --- a/doc/doc_en/models_list_en.md +++ b/doc/doc_en/models_list_en.md @@ -1,5 +1,7 @@ ## OCR model list(V2.0, updated on 2021.1.20) -**Note** : Compared with [models 1.1](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_en/models_list_en.md), which are trained with static graph programming paradigm, models 2.0 are the dynamic graph trained version and achieve close performance. +> **Note** +> 1. Compared with [models 1.1](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_en/models_list_en.md), which are trained with static graph programming paradigm, models 2.0 are the dynamic graph trained version and achieve close performance. +> 2. All models in this tutorial are all ppocr-series models, for more introduction of algorithms and models based on public dataset, you can refer to [algorithm overview tutorial](./algorithm_overview_en.md). - [1. Text Detection Model](#Detection) - [2. Text Recognition Model](#Recognition) From 248669a81e711e87f95b782042dd4cab3acaf7db Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Wed, 3 Feb 2021 11:58:24 +0800 Subject: [PATCH 40/77] update rec doc --- doc/doc_ch/recognition.md | 142 ++++++++++++++++++----------------- doc/doc_en/recognition_en.md | 107 ++++++++++++++------------ 2 files changed, 132 insertions(+), 117 deletions(-) diff --git a/doc/doc_ch/recognition.md b/doc/doc_ch/recognition.md index c5f459bd..c2b61a28 100644 --- a/doc/doc_ch/recognition.md +++ b/doc/doc_ch/recognition.md @@ -1,60 +1,90 @@ ## 文字识别 -- [一、数据准备](#数据准备) - - [数据下载](#数据下载) - - [自定义数据集](#自定义数据集) - - [字典](#字典) - - [支持空格](#支持空格) +- [1 数据准备](#数据准备) + - [1.1 自定义数据集](#自定义数据集) + - [1.2 数据下载](#数据下载) + - [1.3 字典](#字典) + - [1.4 支持空格](#支持空格) -- [二、启动训练](#启动训练) - - [1. 数据增强](#数据增强) - - [2. 训练](#训练) - - [3. 小语种](#小语种) +- [2 启动训练](#启动训练) + - [2.1 数据增强](#数据增强) + - [2.2 训练](#训练) + - [2.3 小语种](#小语种) -- [三、评估](#评估) +- [3 评估](#评估) -- [四、预测](#预测) - - [1. 训练引擎预测](#训练引擎预测) +- [4 预测](#预测) + - [4.1 训练引擎预测](#训练引擎预测) -### 数据准备 +### 1. 数据准备 -PaddleOCR 支持两种数据格式: `lmdb` 用于训练公开数据,调试算法; `通用数据` 训练自己的数据: - -请按如下步骤设置数据集: +PaddleOCR 支持两种数据格式: + - `lmdb` 用于训练以lmdb格式存储的数据集; + - `通用数据` 用于训练以文本文件存储的数据集: 训练数据的默认存储路径是 `PaddleOCR/train_data`,如果您的磁盘上已有数据集,只需创建软链接至数据集目录: ``` +# linux and mac os ln -sf /train_data/dataset +# windows +mklink /d /train_data/dataset ``` - -* 数据下载 + +#### 1.1 自定义数据集 +下面以通用数据集为例, 介绍如何准备数据集: -若您本地没有数据集,可以在官网下载 [icdar2015](http://rrc.cvc.uab.es/?ch=4&com=downloads) 数据,用于快速验证。也可以参考[DTRB](https://github.com/clovaai/deep-text-recognition-benchmark#download-lmdb-dataset-for-traininig-and-evaluation-from-here),下载 benchmark 所需的lmdb格式数据集。 +* 训练集 - -* 使用自己数据集 +建议将训练图片放入同一个文件夹,并用一个txt文件(rec_gt_train.txt)记录图片路径和标签,txt文件里的内容如下: -若您希望使用自己的数据进行训练,请参考下文组织您的数据。 - -- 训练集 - -首先请将训练图片放入同一个文件夹(train_images),并用一个txt文件(rec_gt_train.txt)记录图片路径和标签。 - -**注意:** 默认请将图片路径和图片标签用 \t 分割,如用其他方式分割将造成训练报错 +**注意:** txt文件中默认请将图片路径和图片标签用 \t 分割,如用其他方式分割将造成训练报错。 ``` " 图像文件名 图像标注信息 " -train_data/train_0001.jpg 简单可依赖 -train_data/train_0002.jpg 用科技让复杂的世界更简单 +train_data/train/word_001.jpg 简单可依赖 +train_data/train/word_002.jpg 用科技让复杂的世界更简单 +... ``` -PaddleOCR 提供了一份用于训练 icdar2015 数据集的标签文件,通过以下方式下载: + +最终训练集应有如下文件结构: +``` +|-train_data + |- rec_gt_train.txt + |- train + |- word_001.png + |- word_002.jpg + |- word_003.jpg + | ... +``` + +- 测试集 + +同训练集类似,测试集也需要提供一个包含所有图片的文件夹(test)和一个rec_gt_test.txt,测试集的结构如下所示: + +``` +|-train_data + |- rec_gt_test.txt + |- test + |- word_001.jpg + |- word_002.jpg + |- word_003.jpg + | ... +``` + + + +1.2 数据下载 + +若您本地没有数据集,可以在官网下载 [icdar2015](http://rrc.cvc.uab.es/?ch=4&com=downloads) 数据,用于快速验证。也可以参考[DTRB](https://github.com/clovaai/deep-text-recognition-benchmark#download-lmdb-dataset-for-traininig-and-evaluation-from-here) ,下载 benchmark 所需的lmdb格式数据集。 + +如果你使用的是icdar2015的公开数据集,PaddleOCR 提供了一份用于训练 icdar2015 数据集的标签文件,通过以下方式下载: ``` # 训练集标签 @@ -70,34 +100,8 @@ PaddleOCR 也提供了数据格式转换脚本,可以将官网 label 转换支 python gen_label.py --mode="rec" --input_path="{path/of/origin/label}" --output_label="rec_gt_label.txt" ``` -最终训练集应有如下文件结构: -``` -|-train_data - |-ic15_data - |- rec_gt_train.txt - |- train - |- word_001.png - |- word_002.jpg - |- word_003.jpg - | ... -``` - -- 测试集 - -同训练集类似,测试集也需要提供一个包含所有图片的文件夹(test)和一个rec_gt_test.txt,测试集的结构如下所示: - -``` -|-train_data - |-ic15_data - |- rec_gt_test.txt - |- test - |- word_001.jpg - |- word_002.jpg - |- word_003.jpg - | ... -``` -- 字典 +1.3 字典 最后需要提供一个字典({word_dict_name}.txt),使模型在训练时,可以将所有出现的字符映射为字典的索引。 @@ -114,6 +118,10 @@ n word_dict.txt 每行有一个单字,将字符与数字索引映射在一起,“and” 将被映射成 [2 5 1] +* 内置字典 + +PaddleOCR内置了一部分字典,可以按需使用。 + `ppocr/utils/ppocr_keys_v1.txt` 是一个包含6623个字符的中文字典 `ppocr/utils/ic15_dict.txt` 是一个包含36个字符的英文字典 @@ -129,7 +137,7 @@ word_dict.txt 每行有一个单字,将字符与数字索引映射在一起, `ppocr/utils/dict/en_dict.txt` 是一个包含63个字符的英文字典 -您可以按需使用。 + 目前的多语言模型仍处在demo阶段,会持续优化模型并补充语种,**非常欢迎您为我们提供其他语言的字典和字体**, 如您愿意可将字典文件提交至 [dict](../../ppocr/utils/dict) 将语料文件提交至[corpus](../../ppocr/utils/corpus),我们会在Repo中感谢您。 @@ -140,13 +148,13 @@ word_dict.txt 每行有一个单字,将字符与数字索引映射在一起, 并将 `character_type` 设置为 `ch`。 -- 添加空格类别 +1.4 添加空格类别 如果希望支持识别"空格"类别, 请将yml文件中的 `use_space_char` 字段设置为 `True`。 -### 启动训练 +### 2. 启动训练 PaddleOCR提供了训练脚本、评估脚本和预测脚本,本节将以 CRNN 识别模型为例: @@ -171,7 +179,7 @@ tar -xf rec_mv3_none_bilstm_ctc_v2.0_train.tar && rm -rf rec_mv3_none_bilstm_ctc python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/rec/rec_icdar15_train.yml ``` -- 数据增强 +#### 2.1 数据增强 PaddleOCR提供了多种数据增强方式,如果您希望在训练时加入扰动,请在配置文件中设置 `distort: true`。 @@ -182,7 +190,7 @@ PaddleOCR提供了多种数据增强方式,如果您希望在训练时加入 *由于OpenCV的兼容性问题,扰动操作暂时只支持Linux* -- 训练 +#### 2.2 训练 PaddleOCR支持训练和评估交替进行, 可以在 `configs/rec/rec_icdar15_train.yml` 中修改 `eval_batch_step` 设置评估频率,默认每500个iter评估一次。评估过程中默认将最佳acc模型,保存为 `output/rec_CRNN/best_accuracy` 。 @@ -268,7 +276,7 @@ Eval: **注意,预测/评估时的配置文件请务必与训练一致。** -- 小语种 +#### 2.3 小语种 PaddleOCR目前已支持26种(除中文外)语种识别,`configs/rec/multi_languages` 路径下提供了一个多语言的配置文件模版: [rec_multi_language_lite_train.yml](../../configs/rec/multi_language/rec_multi_language_lite_train.yml)。 @@ -411,7 +419,7 @@ Eval: ... ``` -### 评估 +### 3 评估 评估数据集可以通过 `configs/rec/rec_icdar15_train.yml` 修改Eval中的 `label_file_path` 设置。 @@ -421,10 +429,10 @@ python3 -m paddle.distributed.launch --gpus '0' tools/eval.py -c configs/rec/rec ``` -### 预测 +### 4 预测 -* 训练引擎的预测 +#### 4.1 训练引擎的预测 使用 PaddleOCR 训练好的模型,可以通过以下脚本进行快速预测。 diff --git a/doc/doc_en/recognition_en.md b/doc/doc_en/recognition_en.md index 22f89cde..f6c4c105 100644 --- a/doc/doc_en/recognition_en.md +++ b/doc/doc_en/recognition_en.md @@ -1,79 +1,69 @@ ## TEXT RECOGNITION -- [DATA PREPARATION](#DATA_PREPARATION) - - [Dataset Download](#Dataset_download) - - [Costom Dataset](#Costom_Dataset) - - [Dictionary](#Dictionary) - - [Add Space Category](#Add_space_category) +- [1 DATA PREPARATION](#DATA_PREPARATION) + - [1.1 Costom Dataset](#Costom_Dataset) + - [1.2 Dataset Download](#Dataset_download) + - [1.3 Dictionary](#Dictionary) + - [1.4 Add Space Category](#Add_space_category) -- [TRAINING](#TRAINING) - - [Data Augmentation](#Data_Augmentation) - - [Training](#Training) - - [Multi-language](#Multi_language) +- [2 TRAINING](#TRAINING) + - [2.1 Data Augmentation](#Data_Augmentation) + - [2.2 Training](#Training) + - [2.3 Multi-language](#Multi_language) -- [EVALUATION](#EVALUATION) +- [3 EVALUATION](#EVALUATION) -- [PREDICTION](#PREDICTION) - - [Training engine prediction](#Training_engine_prediction) +- [4 PREDICTION](#PREDICTION) + - [4.1 Training engine prediction](#Training_engine_prediction) ### DATA PREPARATION -PaddleOCR supports two data formats: `LMDB` is used to train public data and evaluation algorithms; `general data` is used to train your own data: +PaddleOCR supports two data formats: +- `LMDB` is used to train data sets stored in lmdb format; +- `general data` is used to train data sets stored in text files: Please organize the dataset as follows: The default storage path for training data is `PaddleOCR/train_data`, if you already have a dataset on your disk, just create a soft link to the dataset directory: ``` +# linux and mac os ln -sf /train_data/dataset +# windows +mklink /d /train_data/dataset ``` - -* Dataset download - -If you do not have a dataset locally, you can download it on the official website [icdar2015](http://rrc.cvc.uab.es/?ch=4&com=downloads). Also refer to [DTRB](https://github.com/clovaai/deep-text-recognition-benchmark#download-lmdb-dataset-for-traininig-and-evaluation-from-here),download the lmdb format dataset required for benchmark - -If you want to reproduce the paper indicators of SRN, you need to download offline [augmented data](https://pan.baidu.com/s/1-HSZ-ZVdqBF2HaBZ5pRAKA), extraction code: y3ry. The augmented data is obtained by rotation and perturbation of mjsynth and synthtext. Please unzip the data to {your_path}/PaddleOCR/train_data/data_lmdb_Release/training/path. - -* Use your own dataset: +#### 1.1 Costom dataset If you want to use your own data for training, please refer to the following to organize your data. - Training set -First put the training images in the same folder (train_images), and use a txt file (rec_gt_train.txt) to store the image path and label. +It is recommended to put the training images in the same folder, and use a txt file (rec_gt_train.txt) to store the image path and label. The contents of the txt file are as follows: * Note: by default, the image path and image label are split with \t, if you use other methods to split, it will cause training error ``` " Image file name Image annotation " -train_data/train_0001.jpg 简单可依赖 -train_data/train_0002.jpg 用科技让复杂的世界更简单 -``` -PaddleOCR provides label files for training the icdar2015 dataset, which can be downloaded in the following ways: - -``` -# Training set label -wget -P ./train_data/ic15_data https://paddleocr.bj.bcebos.com/dataset/rec_gt_train.txt -# Test Set Label -wget -P ./train_data/ic15_data https://paddleocr.bj.bcebos.com/dataset/rec_gt_test.txt +train_data/train/word_001.jpg 简单可依赖 +train_data/train/word_002.jpg 用科技让复杂的世界更简单 +... ``` The final training set should have the following file structure: ``` |-train_data - |-ic15_data - |- rec_gt_train.txt - |- train - |- word_001.png - |- word_002.jpg - |- word_003.jpg - | ... + |- rec_gt_train.txt + |- train + |- word_001.png + |- word_002.jpg + |- word_003.jpg + | ... ``` - Test set @@ -90,8 +80,25 @@ Similar to the training set, the test set also needs to be provided a folder con |- word_003.jpg | ... ``` + + +#### 1.2 Dataset download + +If you do not have a dataset locally, you can download it on the official website [icdar2015](http://rrc.cvc.uab.es/?ch=4&com=downloads). Also refer to [DTRB](https://github.com/clovaai/deep-text-recognition-benchmark#download-lmdb-dataset-for-traininig-and-evaluation-from-here) ,download the lmdb format dataset required for benchmark + +If you want to reproduce the paper indicators of SRN, you need to download offline [augmented data](https://pan.baidu.com/s/1-HSZ-ZVdqBF2HaBZ5pRAKA), extraction code: y3ry. The augmented data is obtained by rotation and perturbation of mjsynth and synthtext. Please unzip the data to {your_path}/PaddleOCR/train_data/data_lmdb_Release/training/path. + +PaddleOCR provides label files for training the icdar2015 dataset, which can be downloaded in the following ways: + +``` +# Training set label +wget -P ./train_data/ic15_data https://paddleocr.bj.bcebos.com/dataset/rec_gt_train.txt +# Test Set Label +wget -P ./train_data/ic15_data https://paddleocr.bj.bcebos.com/dataset/rec_gt_test.txt +``` + -- Dictionary +#### 1.3 Dictionary Finally, a dictionary ({word_dict_name}.txt) needs to be provided so that when the model is trained, all the characters that appear can be mapped to the dictionary index. @@ -108,6 +115,8 @@ n In `word_dict.txt`, there is a single word in each line, which maps characters and numeric indexes together, e.g "and" will be mapped to [2 5 1] +PaddleOCR has built-in dictionaries, which can be used on demand. + `ppocr/utils/ppocr_keys_v1.txt` is a Chinese dictionary with 6623 characters. `ppocr/utils/ic15_dict.txt` is an English dictionary with 63 characters @@ -123,8 +132,6 @@ In `word_dict.txt`, there is a single word in each line, which maps characters a `ppocr/utils/dict/en_dict.txt` is a English dictionary with 63 characters -You can use it on demand. - The current multi-language model is still in the demo stage and will continue to optimize the model and add languages. **You are very welcome to provide us with dictionaries and fonts in other languages**, If you like, you can submit the dictionary file to [dict](../../ppocr/utils/dict) or corpus file to [corpus](../../ppocr/utils/corpus) and we will thank you in the Repo. @@ -136,14 +143,14 @@ To customize the dict file, please modify the `character_dict_path` field in `co If you need to customize dic file, please add character_dict_path field in configs/rec/rec_icdar15_train.yml to point to your dictionary path. And set character_type to ch. -- Add space category +#### 1.4 Add space category If you want to support the recognition of the `space` category, please set the `use_space_char` field in the yml file to `True`. **Note: use_space_char only takes effect when character_type=ch** -### TRAINING +### 2 TRAINING PaddleOCR provides training scripts, evaluation scripts, and prediction scripts. In this section, the CRNN recognition model will be used as an example: @@ -166,7 +173,7 @@ Start training: python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/rec/rec_icdar15_train.yml ``` -- Data Augmentation +#### 2.1 Data Augmentation PaddleOCR provides a variety of data augmentation methods. If you want to add disturbance during training, please set `distort: true` in the configuration file. @@ -175,7 +182,7 @@ The default perturbation methods are: cvtColor, blur, jitter, Gasuss noise, rand Each disturbance method is selected with a 50% probability during the training process. For specific code implementation, please refer to: [img_tools.py](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/ppocr/data/rec/img_tools.py) -- Training +#### 2.2 Training PaddleOCR supports alternating training and evaluation. You can modify `eval_batch_step` in `configs/rec/rec_icdar15_train.yml` to set the evaluation frequency. By default, it is evaluated every 500 iter and the best acc model is saved under `output/rec_CRNN/best_accuracy` during the evaluation process. @@ -264,7 +271,7 @@ Eval: **Note that the configuration file for prediction/evaluation must be consistent with the training.** -- Multi-language +#### 2.3 Multi-language PaddleOCR currently supports 26 (except Chinese) language recognition. A multi-language configuration file template is provided under the path `configs/rec/multi_languages`: [rec_multi_language_lite_train.yml](../../configs/rec/multi_language/rec_multi_language_lite_train.yml)。 @@ -416,7 +423,7 @@ Eval: ``` -### EVALUATION +### 3 EVALUATION The evaluation dataset can be set by modifying the `Eval.dataset.label_file_list` field in the `configs/rec/rec_icdar15_train.yml` file. @@ -426,10 +433,10 @@ python3 -m paddle.distributed.launch --gpus '0' tools/eval.py -c configs/rec/rec ``` -### PREDICTION +### 4 PREDICTION -* Training engine prediction +#### 4.1 Training engine prediction Using the model trained by paddleocr, you can quickly get prediction through the following script. From 94278781c64851db8010d0135740d66941e222d9 Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Wed, 3 Feb 2021 12:05:20 +0800 Subject: [PATCH 41/77] update rec doc --- doc/doc_ch/recognition.md | 6 ++++-- doc/doc_en/recognition_en.md | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/doc/doc_ch/recognition.md b/doc/doc_ch/recognition.md index c2b61a28..80a8fb13 100644 --- a/doc/doc_ch/recognition.md +++ b/doc/doc_ch/recognition.md @@ -48,14 +48,15 @@ mklink /d /train_data/dataset ``` " 图像文件名 图像标注信息 " -train_data/train/word_001.jpg 简单可依赖 -train_data/train/word_002.jpg 用科技让复杂的世界更简单 +train_data/rec/train/word_001.jpg 简单可依赖 +train_data/rec/train/word_002.jpg 用科技让复杂的世界更简单 ... ``` 最终训练集应有如下文件结构: ``` |-train_data + |-rec |- rec_gt_train.txt |- train |- word_001.png @@ -70,6 +71,7 @@ train_data/train/word_002.jpg 用科技让复杂的世界更简单 ``` |-train_data + |-rec |- rec_gt_test.txt |- test |- word_001.jpg diff --git a/doc/doc_en/recognition_en.md b/doc/doc_en/recognition_en.md index f6c4c105..483df5d7 100644 --- a/doc/doc_en/recognition_en.md +++ b/doc/doc_en/recognition_en.md @@ -49,8 +49,8 @@ It is recommended to put the training images in the same folder, and use a txt f ``` " Image file name Image annotation " -train_data/train/word_001.jpg 简单可依赖 -train_data/train/word_002.jpg 用科技让复杂的世界更简单 +train_data/rec/train/word_001.jpg 简单可依赖 +train_data/rec/train/word_002.jpg 用科技让复杂的世界更简单 ... ``` @@ -58,6 +58,7 @@ The final training set should have the following file structure: ``` |-train_data + |-rec |- rec_gt_train.txt |- train |- word_001.png @@ -72,6 +73,7 @@ Similar to the training set, the test set also needs to be provided a folder con ``` |-train_data + |-rec |-ic15_data |- rec_gt_test.txt |- test From bae07f51b53560aef0667c80b32131c1c61e3cb5 Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Wed, 3 Feb 2021 12:08:56 +0800 Subject: [PATCH 42/77] update angle cls doc --- doc/doc_ch/angle_class.md | 10 +++++++--- doc/doc_en/angle_class_en.md | 4 ++++ 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/doc/doc_ch/angle_class.md b/doc/doc_ch/angle_class.md index 4d7ff0d7..1f5b4f6d 100644 --- a/doc/doc_ch/angle_class.md +++ b/doc/doc_ch/angle_class.md @@ -1,4 +1,8 @@ ## 文字角度分类 +### 方法介绍 +文字角度分类主要用于图片非0度的场景下,在这种场景下需要对图片里检测到的文本行进行一个转正的操作。在PaddleOCR系统内, +文字检测之后得到的文本行图片经过仿射变换之后送入识别模型,此时只需要对文字进行一个0和180度的角度分类,因此PaddleOCR内置的 +文字角度分类器**只支持了0和180度的分类**。如果想支持更多角度,可以自己修改算法进行支持。 ### 数据准备 @@ -13,7 +17,7 @@ ln -sf /train_data/cls/dataset 请参考下文组织您的数据。 - 训练集 -首先请将训练图片放入同一个文件夹(train_images),并用一个txt文件(cls_gt_train.txt)记录图片路径和标签。 +首先建议将训练图片放入同一个文件夹,并用一个txt文件(cls_gt_train.txt)记录图片路径和标签。 **注意:** 默认请将图片路径和图片标签用 `\t` 分割,如用其他方式分割将造成训练报错 @@ -21,8 +25,8 @@ ln -sf /train_data/cls/dataset ``` " 图像文件名 图像标注信息 " -train/word_001.jpg 0 -train/word_002.jpg 180 +train/cls/train/word_001.jpg 0 +train/cls/train/word_002.jpg 180 ``` 最终训练集应有如下文件结构: diff --git a/doc/doc_en/angle_class_en.md b/doc/doc_en/angle_class_en.md index 8d932870..d78bac9c 100644 --- a/doc/doc_en/angle_class_en.md +++ b/doc/doc_en/angle_class_en.md @@ -1,5 +1,9 @@ ## TEXT ANGLE CLASSIFICATION +### Method introduction +The angle classification is used in the scene where the image is not 0 degrees. In this scene, it is necessary to perform a correction operation on the text line detected in the picture. In the PaddleOCR system, +The text line image obtained after text detection is sent to the recognition model after affine transformation. At this time, only a 0 and 180 degree angle classification of the text is required, so the built-in PaddleOCR text angle classifier **only supports 0 and 180 degree classification**. If you want to support more angles, you can modify the algorithm yourself to support. + ### DATA PREPARATION Please organize the dataset as follows: From 69c85d976ad1cab364f904e0b8de2885d17267ab Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Wed, 3 Feb 2021 12:20:40 +0800 Subject: [PATCH 43/77] update whl doc --- doc/doc_ch/whl.md | 43 +++++++++++++++++++++++-------------------- doc/doc_en/whl_en.md | 30 ++++++++++++++++-------------- 2 files changed, 39 insertions(+), 34 deletions(-) diff --git a/doc/doc_ch/whl.md b/doc/doc_ch/whl.md index 6b218e31..032d7ae6 100644 --- a/doc/doc_ch/whl.md +++ b/doc/doc_ch/whl.md @@ -1,8 +1,8 @@ # paddleocr package使用说明 -## 快速上手 +## 1 快速上手 -### 安装whl包 +### 1.1 安装whl包 pip安装 ```bash @@ -14,9 +14,12 @@ pip install "paddleocr>=2.0.1" # 推荐使用2.0.1+版本 python3 setup.py bdist_wheel pip3 install dist/paddleocr-x.x.x-py3-none-any.whl # x.x.x是paddleocr的版本号 ``` -### 1. 代码使用 -* 检测+分类+识别全流程 +## 2 使用 +### 2.1 代码使用 +paddleocr whl包会自动下载ppocr轻量级模型作为默认模型,可以根据第3节**自定义模型**进行自定义更换。 + +* 检测+方向分类器+识别全流程 ```python from paddleocr import PaddleOCR, draw_ocr # Paddleocr目前支持中英文、英文、法语、德语、韩语、日语,可以通过修改lang参数进行切换 @@ -84,7 +87,7 @@ im_show.save('result.jpg') -* 分类+识别 +* 方向分类器+识别 ```python from paddleocr import PaddleOCR ocr = PaddleOCR(use_angle_cls=True) # need to run only once to download and load model into memory @@ -143,7 +146,7 @@ for line in result: ['韩国小馆', 0.9907421] ``` -* 单独执行分类 +* 单独执行方向分类器 ```python from paddleocr import PaddleOCR ocr = PaddleOCR(use_angle_cls=True) # need to run only once to download and load model into memory @@ -157,14 +160,14 @@ for line in result: ['0', 0.9999924] ``` -### 通过命令行使用 +### 2.2 通过命令行使用 查看帮助信息 ```bash paddleocr -h ``` -* 检测+分类+识别全流程 +* 检测+方向分类器+识别全流程 ```bash paddleocr --image_dir PaddleOCR/doc/imgs/11.jpg --use_angle_cls true ``` @@ -188,7 +191,7 @@ paddleocr --image_dir PaddleOCR/doc/imgs/11.jpg ...... ``` -* 分类+识别 +* 方向分类器+识别 ```bash paddleocr --image_dir PaddleOCR/doc/imgs_words/ch/word_1.jpg --use_angle_cls true --det false ``` @@ -220,7 +223,7 @@ paddleocr --image_dir PaddleOCR/doc/imgs_words/ch/word_1.jpg --det false ['韩国小馆', 0.9907421] ``` -* 单独执行分类 +* 单独执行方向分类器 ```bash paddleocr --image_dir PaddleOCR/doc/imgs_words/ch/word_1.jpg --use_angle_cls true --det false --rec false ``` @@ -230,11 +233,11 @@ paddleocr --image_dir PaddleOCR/doc/imgs_words/ch/word_1.jpg --use_angle_cls tru ['0', 0.9999924] ``` -## 自定义模型 +## 3 自定义模型 当内置模型无法满足需求时,需要使用到自己训练的模型。 首先,参照[inference.md](./inference.md) 第一节转换将检测、分类和识别模型转换为inference模型,然后按照如下方式使用 -### 代码使用 +### 3.1 代码使用 ```python from paddleocr import PaddleOCR, draw_ocr # 模型路径下必须含有model和params文件 @@ -255,17 +258,17 @@ im_show = Image.fromarray(im_show) im_show.save('result.jpg') ``` -### 通过命令行使用 +### 3.2 通过命令行使用 ```bash paddleocr --image_dir PaddleOCR/doc/imgs/11.jpg --det_model_dir {your_det_model_dir} --rec_model_dir {your_rec_model_dir} --rec_char_dict_path {your_rec_char_dict_path} --cls_model_dir {your_cls_model_dir} --use_angle_cls true ``` -### 使用网络图片或者numpy数组作为输入 +## 4 使用网络图片或者numpy数组作为输入 -1. 网络图片 +### 4.1 网络图片 -代码使用 +- 代码使用 ```python from paddleocr import PaddleOCR, draw_ocr # Paddleocr目前支持中英文、英文、法语、德语、韩语、日语,可以通过修改lang参数进行切换 @@ -286,12 +289,12 @@ im_show = draw_ocr(image, boxes, txts, scores, font_path='/path/to/PaddleOCR/doc im_show = Image.fromarray(im_show) im_show.save('result.jpg') ``` -命令行模式 +- 命令行模式 ```bash paddleocr --image_dir http://n.sinaimg.cn/ent/transform/w630h933/20171222/o111-fypvuqf1838418.jpg --use_angle_cls=true ``` -2. numpy数组 +### 4.2 numpy数组 仅通过代码使用时支持numpy数组作为输入 ```python from paddleocr import PaddleOCR, draw_ocr @@ -301,7 +304,7 @@ ocr = PaddleOCR(use_angle_cls=True, lang="ch") # need to run only once to downlo img_path = 'PaddleOCR/doc/imgs/11.jpg' img = cv2.imread(img_path) # img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY), 如果你自己训练的模型支持灰度图,可以将这句话的注释取消 -result = ocr.ocr(img_path, cls=True) +result = ocr.ocr(img, cls=True) for line in result: print(line) @@ -316,7 +319,7 @@ im_show = Image.fromarray(im_show) im_show.save('result.jpg') ``` -## 参数说明 +## 5 参数说明 | 字段 | 说明 | 默认值 | |-------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------| diff --git a/doc/doc_en/whl_en.md b/doc/doc_en/whl_en.md index 1ef14f14..ae4d3492 100644 --- a/doc/doc_en/whl_en.md +++ b/doc/doc_en/whl_en.md @@ -1,7 +1,7 @@ # paddleocr package -## Get started quickly -### install package +## 1 Get started quickly +### 1.1 install package install by pypi ```bash pip install "paddleocr>=2.0.1" # Recommend to use version 2.0.1+ @@ -12,9 +12,11 @@ build own whl package and install python3 setup.py bdist_wheel pip3 install dist/paddleocr-x.x.x-py3-none-any.whl # x.x.x is the version of paddleocr ``` -### 1. Use by code +## 2 Use +### 2.1 Use by code +The paddleocr whl package will automatically download the ppocr lightweight model as the default model, which can be customized and replaced according to the section 3 **Custom Model**. -* detection classification and recognition +* detection angle classification and recognition ```python from paddleocr import PaddleOCR,draw_ocr # Paddleocr supports Chinese, English, French, German, Korean and Japanese. @@ -163,7 +165,7 @@ Output will be a list, each item contains classification result and confidence ['0', 0.99999964] ``` -### Use by command line +### 2.2 Use by command line show help information ```bash @@ -239,11 +241,11 @@ Output will be a list, each item contains classification result and confidence ['0', 0.99999964] ``` -## Use custom model +## 3 Use custom model When the built-in model cannot meet the needs, you need to use your own trained model. First, refer to the first section of [inference_en.md](./inference_en.md) to convert your det and rec model to inference model, and then use it as follows -### 1. Use by code +### 3.1 Use by code ```python from paddleocr import PaddleOCR,draw_ocr @@ -265,17 +267,17 @@ im_show = Image.fromarray(im_show) im_show.save('result.jpg') ``` -### Use by command line +### 3.2 Use by command line ```bash paddleocr --image_dir PaddleOCR/doc/imgs/11.jpg --det_model_dir {your_det_model_dir} --rec_model_dir {your_rec_model_dir} --rec_char_dict_path {your_rec_char_dict_path} --cls_model_dir {your_cls_model_dir} --use_angle_cls true ``` -### Use web images or numpy array as input +## 4 Use web images or numpy array as input -1. Web image +### 4.1 Web image -Use by code +- Use by code ```python from paddleocr import PaddleOCR, draw_ocr ocr = PaddleOCR(use_angle_cls=True, lang="ch") # need to run only once to download and load model into memory @@ -294,12 +296,12 @@ im_show = draw_ocr(image, boxes, txts, scores, font_path='/path/to/PaddleOCR/doc im_show = Image.fromarray(im_show) im_show.save('result.jpg') ``` -Use by command line +- Use by command line ```bash paddleocr --image_dir http://n.sinaimg.cn/ent/transform/w630h933/20171222/o111-fypvuqf1838418.jpg --use_angle_cls=true ``` -2. Numpy array +### 4.2 Numpy array Support numpy array as input only when used by code ```python @@ -324,7 +326,7 @@ im_show.save('result.jpg') ``` -## Parameter Description +## 5 Parameter Description | Parameter | Description | Default value | |-------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------| From 44420593e7057d31e6b26387f93a08dee7e8c5de Mon Sep 17 00:00:00 2001 From: tink2123 Date: Wed, 3 Feb 2021 08:26:46 +0000 Subject: [PATCH 44/77] fix typo --- ppocr/data/imaug/label_ops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ppocr/data/imaug/label_ops.py b/ppocr/data/imaug/label_ops.py index 26ac4d81..55870a50 100644 --- a/ppocr/data/imaug/label_ops.py +++ b/ppocr/data/imaug/label_ops.py @@ -255,7 +255,7 @@ class SRNLabelEncode(BaseRecLabelEncode): def __call__(self, data): text = data['label'] text = self.encode(text) - char_num = len(self.character_str) + char_num = len(self.character) if text is None: return None if len(text) > self.max_text_len: From 1617cfdd1e457cd6e33d5ecc8ef3e41ce362ba85 Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Wed, 3 Feb 2021 19:28:38 +0800 Subject: [PATCH 45/77] update angle doc --- doc/doc_ch/angle_class.md | 4 ++++ doc/doc_en/angle_class_en.md | 3 +++ doc/imgs_results/angle_class_example.jpg | Bin 0 -> 62016 bytes 3 files changed, 7 insertions(+) create mode 100644 doc/imgs_results/angle_class_example.jpg diff --git a/doc/doc_ch/angle_class.md b/doc/doc_ch/angle_class.md index 7e8e9b34..ad25a666 100644 --- a/doc/doc_ch/angle_class.md +++ b/doc/doc_ch/angle_class.md @@ -4,6 +4,10 @@ 文字检测之后得到的文本行图片经过仿射变换之后送入识别模型,此时只需要对文字进行一个0和180度的角度分类,因此PaddleOCR内置的 文字角度分类器**只支持了0和180度的分类**。如果想支持更多角度,可以自己修改算法进行支持。 +0和180度数据样本例子: + +![](../imgs_results/angle_class_example.jpg) + ### 数据准备 请按如下步骤设置数据集: diff --git a/doc/doc_en/angle_class_en.md b/doc/doc_en/angle_class_en.md index 9b807370..0044d85a 100644 --- a/doc/doc_en/angle_class_en.md +++ b/doc/doc_en/angle_class_en.md @@ -4,6 +4,9 @@ The angle classification is used in the scene where the image is not 0 degrees. In this scene, it is necessary to perform a correction operation on the text line detected in the picture. In the PaddleOCR system, The text line image obtained after text detection is sent to the recognition model after affine transformation. At this time, only a 0 and 180 degree angle classification of the text is required, so the built-in PaddleOCR text angle classifier **only supports 0 and 180 degree classification**. If you want to support more angles, you can modify the algorithm yourself to support. +Example of 0 and 180 degree data samples: + +![](../imgs_results/angle_class_example.jpg) ### DATA PREPARATION Please organize the dataset as follows: diff --git a/doc/imgs_results/angle_class_example.jpg b/doc/imgs_results/angle_class_example.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8e683be32cdb20e964a7154980d5b1d33d6a8eca GIT binary patch literal 62016 zcmbTdg;!fmv_6cx6I-Ye>=?EiQk zL_7A}aKyDVOIZZV#bT1xE;f*xIQ6sv{tQtpfxgn^+Sc6Gs(MHITB6^$#CiJl1t5?< zviT!4PdAeV&z$pMyT+<0mG>$x+fo&nWLYj0!D}BW_G{n6I;}dg>Dz?6J*sz_G%=r z+s|$TpJv3WBmG?};w-sc^#3sAIMZ`N0{z2o9BEW{h$ z)3u+&vckU4R8wD;XDM!!9<1!A`tTubGa_Z4Ku4H-68Zn#l99%L6L^pM=^Y_CvYUB~ z=PurAu;{Wi-%lj|x39R6jNc+Hy+a`lwKf-KLt!=-TqNM=qOBsF4e5ojeL-gJq5vTq zBTGS8nlQJ`i6k+5AvEMa-eM7I(~JK>-`13zBp-X%uHr@0fJV`oWI9WG3 zh`#L=Pk1R*9wg|G(Sfq@eFyI5jfRXxG4&3Q$o_l8EyF7n;$LAZ@o!Z4b;3R{;;T?i zM;I$_58-YzVua^ek(1j;OR!*(L_sY{E%ePr%sKe#Q*Hi<{qxX}u_3>BquzgDUS8#7 zC?bNK5o;37-SgYRzDjfPVliV;_DF)7030mX+YAW-MKs~9>a1d{@)$E#&@4nzb;Ur= zyt&)ntfSHe??K$o6mPR`f& zLi8=K7=KD`5^u6>23k^`ak#_-)O48B5+qgl|KLr3)e$QRcF%UtxeU`_q55Sq>^3ZS z&fzL{hjqu$ob!+%S74~)&p!N`!kTdhsW8hm{5(vpFkPp;M0M8qKMpYErU4<)l@?wb3B0@_OoX@za?n%^~R+@+rD?T~ROywQDQ_9k`&Cs{3diz9%8 z@vpm{#2nU~!yNu!Og-Kb=|ZSokx&q2oW9D6vV=NSE~7hR3vZUXnfiv7j27L8-jcME z>N$Z~wON8vk5c^7nOT~-t=Wq?&N;2Wqryb^y7&!BzXw=i3Md&p!O=pFCd}nO)AG%fEU@Rp3}6W#CtR?R0#x%xm*^%spo~ z>!aia>|d*RRcs+JX*ee_s6Gj-VqNGtERN$yYqtrWq z7_b-^jXsPyO()V!WkE@#VaZ}S)g05<(p*SuNR<|z5wgm7lRB5NCJIjXOXC%&6zQ}G zt~LMk`O{Gitol2578m;Oid2>9a>mL_CR@5rg{-5{!xTd zBu~WN;gf^uxJY)Ekl*Oh_>!4pmP4(>&B}Uh!4mpPd9{75pQE$gM15slC-j{M7RYda z@&GLCJsOhSE#Bw5Z*|CcW_Rv$dbr!V+PFq|hI`3*eRvJN8$=k~%HZ+gsSp+u zk`uV&7*j6Ne#ghbUM8L$oVRvc@;HK;r$BbP_vF~>}ma+i5OW?d=IS7dOT|{qX>5YqEN-zk=zs?R*IH%U1H7R89UH%P}kK zY8sajmthwVm(#7sP4%rE>{^oFtj4Odz}GAdEYQTwUtfPI?cPnSNIUZ_u{X2b&HLBh z-Cgi91eq{a_)Tv@WW5?8$c4Gj{WhQ*^{Hk}rw~*2rdY`#BwlYAq=?7Q8 z)0&o?R-4H#^84egNk{CBJ%Yyjy`u*QS{j`jEk$uE$S6SgIo7)0R@jE5MzPx=H zR5NvZ=0m1wb#jvmm)Um1m-~DR)t`Tqa8&oyjojwm2hTDKrUWn^PF(ye{dZpla5}5@ zpYzuaTp%80K1`8h$6~v+V7FoSvKtf#wJuG$;eah*dep)B?BrmzJaD2pp5f^{;7ID&QQvQ`?pX5GPIkX*zN&m|g?uHl zv3;!rbYCbiSR*B&BJo*bdna$FTq4HHY5lHQ?!8OxG`OZ(4g57=?%t%$WWm$#)M>Gx z75sSueEMT!W1Xt0v3a?!#evSrY`tUfe(T!nsp}!HLaDXr&+aJl6ta)whvR1Pl$MrI zaZJ&<-m4l!p8RmQFTp*HlVg?B;!5$9p(`470*%kL)ks`pY#8-1U_f#no;a+fr+Hbl zX;bW@dI5^cRa{SBT=tW^^;XXCGn$zYk)Z=vfb!4(Aci%;DqtL~m@FQE)HIJ%kPFM> zzjuQ@v3|BBHK5nT@Ga^QM1c^Jq!wJ{j07 zy(OzPqq4==@z~|q?l5b{?;w4lUU1>D!mO#g!)3?jc5L0Ky3y%|`DWPp!bzq*(PM9J zt*y1-Mt^N^oi{M|JohMXy!XJ2JJ2ynVpQw;{>n=#GSJ=mn*{tpbl*7#6PHexE=6i8 zNatquX+I+GTVA&9MK-=PnG9Wk_GRnAsUGi`+0$kC4V_6Zc5i4UQ{{lEvvv@~sCm-h!V=NE6=t{{9TFdZ}^&)`qG!nwm%)FKw)s z^4AUt?WKkMa#6lqNJuETDF1f{IXoBj|Jq2+|9yznC_;pU^bSc?LH2_;^3fV*TaluN z4BQr%OE1B6=NB54IiZ>Z=c1;D5X4L@_36e54L< zi`gQ7MzT$ZFleTmFYTP11a({-IeU5E@K`V1*L3|mS+%nr&ve`UCnk6yd3w)dsMj+o zpYrbij4}9!W`}JnTFd{lzi2IPu*gZ~|Fx>vm$!z>OzrvVx&Lbt;T5PRDF17RD!CHj z9tzW(|JV7YtN8z~u1++>saaWBqH<);Y`bpbVz%k=6gto1QJJ}_8~v3l&*3;loS)sj|-9DoZxvHM|I@80y!-|Kkq`>92Q(M43!z1i>>zfFD$D3C$IUI-7qX_$3#kj78QY1pBA!u!_+En4vAqe z)rVxymzDhUXs0 zig^zDzsqqq{#|RY7qNMrn-0jM&!;cAkN)#sC9MN>FEy<4LTSeM*pcReqM&>u{^0|i z3dt-wnK9pLYMhfZmSDS9N*6abG9med($Z!>~bf3RvV+mArYZ+V6zGiFxLz(hFWZwi6;cXQ~2i>0^JV5o7Cfgt!OBw%xyP>)6(o=lZ>#^yPuO| zO_|nbd#teY=9GAmi3y&$Ckf8RyI%9!@PIYL@}s`41!K}0#yl6$UKM|Odb;T}n~C$^ zW)u!Mam$ea(E{J}>`C7=r(es=B!5dp3O5&>K<0z6l$Jk*6R{?C=n$YkHD{vU8q3rN6eZ`33_~M1 zD~cbs$&)_pBpaBDahu))Oi(UZkS8WQ*M0WH&Nj|sy{Kv7-!NxIVr3VDo*%=dt``e- zdE4MU={6=z$GlD-s{f7V9Gx3&<5jo%pF7jYwuUYYMD!(mBQwrAJWWChv$`AB3e?de zI_PhvMv^=>aB93F({e?uk=>-L&s8WY4Bm(LihY5`z@GQOrpx((Haod({t>iyJ_eJtY2)byAZ zH)-U-?!z+iKkHA1kMedth~pi#->ldBBlf{&tRB-ai(Y1;-hY$0OLG~7VSW`ep(2md zwkwW}U4;xX_&bB}7@X`g|_R{Ic zW?%kc9iy|QaZBl9VL&GQW5V5jzjjJHE_Jg?&l({af9?58x6fz;F%O;9)uQQmJa%3v zWm(XYa%e&`M&D@(hc>tw46+yJaQ0P&wwK77@yGT7y1jH=9C6^fVq)f(dX>I&=HUuw zQs1vV2e;^+Z#88xvch+ha2-Nq?A7cIi6zI|$3N=Qe#AaXMKX*$CU8ddMgkfX}q5LFOsa}VMvfQt7H z40D#3yEW5iHtu=uMhTkFx>3GokpZJ)WH81X>@5%0$F_AOVq|d{rDiY~fq!LEI;nLU zq5P(8nQHFfOSUik{p~5fJXvXCW1R^UKoIUS+PlA!VeG`!87;Ooj?6a@-L{80;zBTj zCmH@$rCx9q(L+DP+ST9Pg$WdUM0JzjFYo7HC8D#-X>l9sk~;q1bX-CVYN^?>?V~sa zpoO*LJF%Xdr{#I+;A;3?6^!q_BEOy;Ok4f1HyImbG~~4hGD__q|1UV+qcmT4Q_5%Q zh9n|~=7@#@usj@xhGpBexFy84#j%Z}T0QW(Ey`B>VB^s13goP3o?uNClxWWicXHU# zZbB+2vkw?$!pYzC8+1VnqR@A#P6w1TrFuZPkOB!@1YEF|-A0O4oWVgYZcSN0f7@1d zbKPG^RugQje4CEui*7%X2#fi5Js%k`^Ctdi?+IPbuo5snhM2XUxgowW#xn3V4qmp9 zc{D0xyJ!EGri)mR2gF++@J2vxc+p;X-phaXd%w65Lyi%Jo{#w=1o`+B;TJ=!zv?*~ zEo#~&!nMU0JSNKfd^`T90y3v%f>0o_nm3MrS*ae3pAa^#2&-F~3f&`=sJt9WB0vR$fyf)7bx4te3VZPmC z;iN#23jn(!DiZ|_07$b#6KY^hwu=^{vvS|#qtiJC{5S9l#^9rC3LnB>6kb4jjKFDc zFzqjL?|>E2*b2dS>>ASGFDuZdj%!j+`3rA%j?M1?_VJ_nBPK-sbl6w9(}kDntV#*x zWr5JhcNAH8C{cySAHLlY;Va800svO*>6_+UI8dk&e0}?+Kne#42k1_aZanymZ~`a~ za~yQ6?2{WTXFg^PHyYLO5N|~jvB2hwKSs4s%4DocbPCPt6dzc9Ln2QA{CSqL_m+I! zSiZ!{yPLJEo1#g2n#C0ft|6Xtxnx*UePJct9kD*oUp#nPF1%WHs^3yCu(p5lvIZA# ziOnx5Y?OW_kcvfLpO|H=32jkY(W=ebZIKg096G}}STTX9JOHN%zJ>koiMYqNN_0Mu zppLA$upx8PPNV&>cxSBc|Eigyx5#E(OIP<{wjr{%Y!vn`8IBDl82nPK_|jVwxr2rR z`FPUuEm!kK3w{WHc3W!ILu$yFDq|SH%JE33PJL=c`n1dbU}vf)LAA02aey%X;ntTy zGC*?z3XjoTAL~)>bY{D>B@kuGTVH?i>wA29`D}Y)rMNs>AqqcjiO?mC<7oeoCq&4b zX)+#==TdaJWYawIXAJ;LrE^G-RpEf`_TysQnoRnGT#kS^KtX5^>O0U+#!80xlWFAl z7wf1f+jFm~Wa2%zQr&!ncqlf_!sduv3F8;N7E*6~6t?+M_HpWpNrM2jj;z^T1Mg~d zIe+_3;*2~a!cZR)$itiz0|CYztaxf zJ+U=ZQI;qKr<(qr-WeUkJ+A>ZNeLPw8jaFXH72FH5 zKP|b8fH?@pzfyi3HUj!{HF9<1c;JDblSt8S5svR)x;P`m&rCL1P2O;iV_G zLoOOQ$H4n9F>gYLKJxkNVC2YRl7vHe+CA7nd_NDq&%+lYcM01DAb6Y9mE;|^fw8nc8>t6|T4 z))sO+8#vG;C#R3p=+;T`Bzbe1^<=dYxm@XQ^ibtJb*pIhT14W9bI|=tuf6HTGSqM5 zejSu^ziR7!Rca=+ded=3!_x!_nG=fJ>cuu2kk>L&(i&OT0m^KNsTI7UrcIK!Crt=6 zpAFv_qPMcOu*p>CX)Zpmy+3I!{p)kvuY@qGK&{s2|bdzRObZ)C!7Ac5te5{M_W zEEg&!D2K|PzrcGx&@|h=<(Ut5{m4lAv-=`0Q za0&^6LK;zbdsJI$41p`qXcWkLy}L+yORFb@OgE?IwbZ~@YvRS_ya#N&-qmT6wC^mB zJ(Vp(1TiR*41-uA_N3Vm6mY=#pS(bUEQ~;vSMm~wGMWmars3WxsWIrXSQP0=uPv#ShIEOJn}7d zGsHCfcVnA1;NqWP_~Xq+koUg2nfOuTMqnr%lt2>{=WR|qNa<#OktTGNwm1t#CfHf? zTq!@ldpr&5jLq42Gq3b<3f+|U@pD4Q?c6RPaF{s-kM04286FpSS#Xto!79`2>-_>~ zq$GFw<0xM4(czPsdRu5th~NK%zF>?kcpdm@{H30iG#?uW&HbH9wOO4cN6fL3n<2^D zY=;0Z6nVbP{m!i{rlaj*4}CR~-Nyi)Bj(Y)une;c zo-qPS;4!2-S20JSY3duP_kNVC+!sga4y*hDN{TxV05CNZOVTtHvo^dO=@cwW$C`FF zi*5$Q7>T%jQ&(7_U-lFlrfLlN-t4Vn6vg>Cr`UnO)DP_F`HP(ey{D1S!e);4JU6ni>6zVkK3~9bUG-Ruh?K@UZ=MKn0&M;) z2>Z{Uy{IpM4I1}+8y9Z&v}0DGW{DTT9y63t@MmtiGh-DBwU)@GF6eHeW#E!teONyh6t)$zQ;RF+IYfxllOD z{3op?Z}TbIz`7DbZg-7V-*dHZ>SiTaECF3LRFIgIK-C${3P?N)TE&0ap=ZJ<+9#Ah@ynl%hxD^U_3M14o78m$E&{J+a>ZP~wgi zmq2bRDbrRdp4MyW%hfH~n_@6cn-Nw;lJs@;v&q&fPsi=1uW%8aPdu*VK=kHl_~4L` z5GR#UU+53Z%>N+!?Iu|#K=Lw=kmh>TZ9)xv4Y%XEy^7-6L4WMF8_a0y|EfX1HH-=V zkXl+UqVa&7B2A)sq3%$XLhilGA#{O|YY|roLI7qU{tp;9NH6gUR7h zmmmy9OhG1j4ihu5ybyqPj0o$5b@KNpPebAxUJ=NyF&!-m+?Z8ag3y&SFg8}nolK8s zvqFchsyZ*SPVp-AJeRCfpy#8!l}e6F0)zn##U%fJy3(EoJ>QR3cn!KLy2>`kE)@WY z<%vely;8~ z6`#UZH*9g=2Q{{?l?S$v5KL$oO9dPnZBSOU9Q`_4v7?e+sQk-EK!IF@<7 zkO@x&ikFOUbz{;Q@BK`une_i5ho|)=WdOMgJ1+igc$NGM>2`NvoU6IK{Rl*tbtCL* zu91(5GmdtPxrdWZ{3W~AZ>xsx{={AO>CsEE6u?wLJ!EMI>J@nfn(|f|J@~AQ zc4efU9l`ixX@Wbmdb#JU@O?&2lj#QT$9iU7Jz$nlnDmt@lz5sI0DnuD#@5>%49hT& z?Lsrl40Z(t$4D2M>G;yD=`U;&Xn-^u>Th>QLRsO{eGOW0=Q8($@!%SM1|S|DbHSL{ z>fN|k1%`TBun7CfzZa$>>Z|1AepfDeOixD1T&*g|Xd4bSZ zX+7;H9+k4IwmAVD1UHx6R_ELViQi0pJg^7;p%qqW3fh@ZYpw4?8iVc|4`Wxx?*()2 zG^7-1-2U4(WILWh`{(v*(NV})w0sML2Pm-PE;{hIFn2+>97evO# zsi5FQmd|4_)Okag#XIQxasOPZXzA_@myk-$dUT$EaVrS|_LVEqw)(vr;pgyR zLYiP^%wSV&5IQiDRT6hV)0AQyW&$rI3$fxdX{27CW^uf8aQU&`1>DpOSa}JnKr67l zu$7fnHmX@t0-O*4`BoFU-bqrBY6ASfurH@%_ze^dxXoO+dnY!<&m@QMGIMP=A?`@}l zg((7vbyHrKebes-eBqPM&Sa2&*-hqb(SfR87QHqCIJE})qljp0&1W&e*W@1rCJ8gy z$m6&&({;i-hj{A3^#hjM1jvOJOIlid`xt4pwU{z1AP5|(0VP~9jBuC9ez9CeAexE9 zX`5Ss%2P(?U51CQIkk5*bDZhpu%ZbSgz(rg;M`8jhmzg5$Lm~2HsA}6#uqiC=?2>8 z-6*HT7$0|TJ6(?<`tT0{^GYWP8T?nHfIND)B$qC%<(NFFs)ON96Wrir7LC{h*X2S` z>%T_~ZndvD?y)J~LspgI;ZN%k^Jf7{rY@Yg?m zeEO4lJ=?gmT{zqbDhXG2@R0PNV0{!AWS=k)*V{AS6gXq)+vXG;51Hw1UB9YmVPp_$ zH6?sck-c#nvq5eO4~it1TUjs1*=j>dzf=MJwmRoiEQC?&u4UVi_2?%Y#mxlj|K9x zaXn!b(P{iTBKqdhtj=EJn zymVUPAr%KGv1$ZL-`34YC$DVlZ&c=S7EI`$Ybx6^Um$n_8=rMLIQ8%-LtFsoDzkaq4+L z2y{6091#)X3v&Dzu&W-hVqhd^o`lM(N9R91nIL{k9Lo8qlRe@7cidts-z8yU95Fex zdleb{%3PA~STj3{r{#}n5Fb&4)$!JG{{`wZrC<|%26Zo;4UPq zi4(<{-zOB;m>Mk*?J}PcFguJp092t34Cn$8AwIc>oT2l5`7q!|6|&dDZW#8MZaLJ* zZ4i2oXPfEb>$2%pIw5AM5N-V=sS90fr#0RX95_TGX@xCdo4?4oMnaJb|ypUsoEG#U&w+bk>l7*ll zTT$N;cJK$lNF^aOGA&mlJ)-88obz$TL@GU+5&fmup!ZM}OxV}OW(&FmC+XDLj_RzOd8dj^B4Pw-(@jGzokD&Pn+vwVrKdl60f7vfo5$S;F& zfa=^wbJ)Ds{c&R@A8h-Z7|N7K_#QRh;|uXQBF?&t6gXAOLdUoLdbkI8`(J$>7WB9f zWK#RBoB6x`R=#2vCS^!tp@fg!dAD$m9VfVXK(-S5af>vV6N=9Js_6n2NGiy2Y*5aB@Z zpRL$?FbvaP zGH)Myg^^Tm!J|>LC)c$$2t}v+=!IZx^1tW(oOM9k-I#L)E4E|7c=y0!^gz{b{R!pI z5j<9^@>Bp>6ROFTk&n{+p#8+=-(d!iV=f~Iju|rNxh1wdMJZYZpq!c~C<=~-DXjzU zi1`wtl|}Taumrcw695fK?8wJ26L-x2mJJagV~{-f4iT!fL>$_hLaHlCiV%q|46A)fc1_|c7+gV|DifbaQIH<1v3X(-L9x3V6C z*Ib^!8IZ*{?_BwK?G~IA?ch9{$7@s=~~3& zNae#wdW1YL<+5U{^j)uQPyXw3ScA8ixVDM8hKS)Ux2Ln+lJ@g*<>ROF#;B*$=cm(= zl<@jI#b>tUHX>skHSQ5GjijPkn|Ju~LMnEGJPhYi`y3L_W8CCv8@dZ{0!vy4)Dn>k z#mWjf?;_!~L561cEgIK6OQL8s54fpud%Zt$kwz$TbR~RKmhDXFsF!S%#0X4hPhXA~ z=omJyY`^~NFX;Wn+8EBmu_Y(~s>xqTu*>F#p#qtr9+LZ1M=*QBkb^|d$5zBB2Q`8J`tI*`0a5~3+gPqV6u5z)qe~KjDSE@$7bIf5U`m~_0%5g*w`y}FXm;>TW_5q4+QGo%L zGi5XZb=JoRs4VC6q-oZY`~O9hV}~-wEx!U0rvY(p)}Ckot#kb@g7Q6 zYdmCAvg5XYVj=6dpNrBWYx2|=OAS|+-}#>{8UE_i&j#z+3S^lArSG;|3@=8G8dhQ# z(3fLg?F5qrQDX(Z8~_a(?CsT}F+9@{2)c{FE;Y3B~e*g5R-N&H{CX1tGwS`pRKlDJ~nDJ=Y) z*6&FF(hvL$Md)WSW<|>S?;4~SAJG%GH^nQ6k|L*C&^Vv^Ib}{B2k8rIz%*J-% zQA?3^8t2Cufu>R#= zNfJN4CVWw7nt~vVwu)jNsGi}ELa76c{Ec=Tj7`wO%m#MiA@|r-T|NMJz=-Q?+tu4Y z#cFNbTR-hg#j$KD5e{*P6U6a%eJAj^cvEyh&6kYM73YG|y#0&L0F^-tn;cuhPR?cT z?|5v;JAaeDmqW@)=fX05`|e#=INrfE#Cm-Cr?`aLt7hGeuP5$gtZN>BtQu zbEG!s@d%Nn0Gnoc!=}ih{31S|Vg)9dw z0}RY~{h_#})6{{t&?|f%0?X&e(~mp07T7RZD$~L|kGu1FWmnK^O{zjXGjGFY*JB{t z6=V;Gal<}t0j5-1ef%N7d`gwUjWjKOhgJG4LU`dj|M%^_DckYw#ucY~VaeP#(MV+N zvIUY^L*v@*l7?0Vf>!D)&FN8xy37Sb@0N$Wp&`b@ADB3U zjitPD4L-j+x$PG#-B578$}}^{tpvte_RGA<7nIP+?#$X@-t_WY&34SU z(0JS&9L|Rgr%ERy<0gr-yCUsE8D#@tqDtPO@=`5|^Z=9s9*_<)f&Zq_-#8J#i&l;)E<6rOQK#<1Ugw6+rV=T2cT6;u9}MZ zH7oWMPw-oxPi8!O!fKVLccu)c3$-Y*U7I9U-dsr=_w5^0(l2M$gsee3>b>F855b5v zRnlF9PQ={uO(Ac9esi|30?h<~g(wD_rHe@mgKt?OfbDxC4j6P)J*XcJCLA2_nJP!e zRYu2t25}jQ7~P7SH(e@|Ei~40`9@hcu6a>huiD?p-K2!ywCzdl;3wC69lh2U@+6HR zL1AX(#i6i%nzBy8<1`qX&k8iZazp%TXak-XXI}rnRTk^AcK|c`urgH=;KDORW7peK z1zFo(L<9Yme5Zi@h?+JR0EP91LIa3Nzo+OH zbBx01FIPT(`-?h-^>87MZ}n1+Dh=?)kmQ!=A3f?g#Vsvg2D$G9eLSO7!Y6qIf7$2V zTEg}7i@%)UfXk^tyn%E5RA>0*nni~PQlKlNkvmRaJ*ob)Y^_Sga$!gd+9RWvN3h@^ z3PRyf-Dq2$Wn`XHI_s#r{i@u#92{oQw)w_GP`R?4u`NVda2?&Jjp;kt&bFYbIYb33 zTEO_04--Z-S!{}Re11!aC+(;+1rsXP%z?~1twZ!ri+XJJ%IvQQ{T6%Yhk|(^nr2la zyQ(HI0-5f(>=>GPHTVC3HC?a;&lH;BE!ys_$|PxfL<5Aq(=(F(8$I4YpWM`jDoS)> z@F8y0<_o7*KCB8Y0uwrx|{5tMfyw~#Fzvo7z+v9F$%MIBoPw(FeN2Ig= zl^ly2#h9lkt<#EXt3d8i^q-$(1`-S7znvS}8rX(L?8A5Z+4+{ENPqtb*jMb8p}n5y zt=ZY(RA^YWS@c)E6LKi&JUUkoPgdlWYOC*BqtX?skrVuppkr?408_)MJOF&G6l?M9 z3U9SP?~j6g)#AvNxl{dtoAY|ENyp=M1clj4Emq`M+bL?@*ru@(Ke{?P!U?#xXY?W+ zSrG3WXnflRk^g|Xo{ESs_g(l2{286K=H=O!*DN4fC__O~9zpTR=(eW$jFLB8P5M5G zjNit_2B)(&Iq)F@*AZuY^DpO{42IT{q|w$c@a{i_ z2jBCR*|V@W?%|7yLC?b*roqQU8N*GO^8pJ#@ow4h={qSX!cN})R)H=%X;}R6PsH-1 z>`j}HdE!>Nx?pFpyRWQ?xEZ1dLfAW_M5THpC(0#=I*^=Z?_B~Gk7o|Q61#t2ydJB zwG?MznJK(&Xizt9@1h%&MaZ)2DDzdap6jN{8MerU$ar4Ps&To0pe}}OT1j& zjzEbMK*w?!%>=~Ju4tcjPm<3!k(1mPLc(TvPLme=Bz3C(5!&uzR?`m|{38!_# z5RxjAJOR@f%%B0x0;kNwX@RgV&2!mzSWgYcP>$v2`&nj$MeA0}FJS?5#5LKs{Pi#Q|`Uydew~K79N!A~h z&yuQLcbX23>c3`)|Gqb0;NU07L+1f@>CLgq+(jI0w@rQKI?ttsy<-1GbGx#BeLZ*S z!^4kve^l!wtb&K!91Z;H?yoIE^Gl0Tx__PKn9$w%X3eXd6`86ebX2hRWyHgJ z;4!Hz2FA+TPlTf>ab_M`dx~3P{5@O8%v3A}IY4exVd>4EjJW?cIRwFWw<$a(pY9Wm zn@k4@cc8&1Snj87uD>2H!7?VIj1|uRi6rNRf6{KHnPo6P!bzUaetPnmPZe{!^hYlF zrk~CKA_2!_ZWx`3y$$F}z|VxUtzbb-T1AkL^Uq=u#U@#DgnZhj^e(*L`6>4s&sel z2fN4mK{Sd*2HhQ_Gx)KooCro!Uqrr|{np{1Lkcul@XOeodw#{$=6L@jd8 zU~$l3nxv@S#pD`;BeJK2NHcmOkA`}2Qny{oUd z>usSKqELL{R-yB*&Z4H2#WsV%Ko&@Q5IO@09DEUy-s(?zZMb+ea#a3fN96>~^9J3G z9`dB{pPOgO8nt=PIvRwEh;EajMw#08KICJ@RRFW*{T*BH1Vl5h$T~olYk2W8hOA$N zMnb1jc$7t8x~!kaW6HCQ9llA*oVD##^*)tkrvsE0MqG$L+eK@e+SB|!o3zL>>I|+7 zEvMz(Dk8uWpZtm8iS6W1;}4%cXgfYh;ihuN?jk2E7m=U;Q|eIT>X)}>{3!|+-C>k? z%VJgY_>zRKmQPJi#oG>Q4&NQF?en&&_Ae3&Fe?N0IA5}ddB}T>kpAORZmjq2u5Y;v zR2U|XiwBCA0uGdi7`jO~poUD*g(L$$)~xbN?Zl;o1>1%r-X?BiS|dh+gTEt0^R{}K z3Du7M^P6CsWsrhzH%)_2S5NUhIImgReKqu7w~Lg8X{U$;z*7PbVr*RtxRSJ`m`S&- zs8J?d-ZMlkugGMk^vm0nPiAbUNQw9nS^_vl2e2cJqBUjr()a+&XXLa6F%+ zo8FQ6&0acoc5eQp^;FDTB34!aLu!`yTof2xCGKW3c<4kU8R&p#oRBW>YeNf&>wH}4 zEWbP?miCZ5%hF%sLp;{Q=PAjlD^N>AxPjq zbD7(1VIW`^tDk z2SED>8zeA=N^M1-{??s9w+7b)bPrd*hifm@mhzK|quu($`L=AMCz-}>>w6Dm4iw=b zLJu4nqQErjW3_+{V(+%Rg$n{7HwRiVKjh93eB=V+(9hMl9|84M2*Nf{yn3|CwvE$N z7=dJz8}!TBm@+LsOh`nmf0#o3Wk(idr-ezjd!A~ThSLRluLTU`LQo0d?$oCh3hlIx z_~i0Py@sk{gscu2{{9`p;*{e_7uBW!Oo7I;npUspID*U1|ArDY1ToUbg2g7nh4)1b z1J1bL%<^YS4-Ma1KYdDzC-a|Tck~96e4GG{C+X(VZJ$RUhT!sWRW^6zwMAUzLI0DU zN-0iJ`FFp`9MIMB^nNSj2#*MHrGkB3-Zo*T*ix2h`O`7$;_nl-yF;3xUUNCZ4L)He z5_c-VW<{LEg~N9*x4tg2kB5Hgwp)%EwV5p%kBfnBS<&4q)LR99i)K7ik3E~ZsCq@}e#DAT%26k>| z*tJy=qu$acCg?Lv`rSj)HeB~kGgs^DCoj-@08UY*YJ{9Bdo59aBYetUyNTu9%}n-n z9YS8tTQ;NGYW9)@`YI*?tFpSjbY=p@OJ=yb1=pj+;`JoWG6Y*{pOEY+398oKhGgpJ ztqay{CtrO{Lya(b1-;PCL44@n8(&d>Z8p_fCz{E-2k z;kG~cu|xe$d1O-CndraHrelGRUBgZ%mqJbwlYz+IG=-WGTkBX=&m)X6*Sd z&`4>U(6{#O*Ru?}x>rWfp3@ksEWJ@}x4Lp)JT5q!&CNeDPuhz}3A#(UqOY1p*iKExz6bnnR#eowJ9 z6!afGv&}YYn4w}igDMbqA1_b)kqkR8?RC@+X*3KD68!K8)LZZ$U=&$gB{>6mS*7RF^*|8U-KMNXQ%PjCjKn!m# zz6OKo_sh~BbJl?GlL@W|kZp})EN;^Atk9m6)1M5sZW&MaN#T9BZwVc@G4vw|s=Qie zXgIYhd7Wn$=fq2(qYTQ7#)$uqwzmpu>-(d=ahFou-HN+IfdZvKDA3{rTD-UxcPmbD zcPQ>|2@>2L3WVYwtU!=H`Tgg4uHNf+a+zc%lQU$KIlgy~G~4KTTN2 zJ9SYg%E9a6XecG@-Ua$slk%_i(8aatrn&R_pv1d11PEf@kNO>#2Oi%nD)ae)aBJX} z1aX|21etvNV{3C$uhyi+Q~QfF;AJbD_40^#Oslaozx0Y=8r5OKDE&xBtwndd*;NIxM`Z3S5IAMr5EN849Qh05Zs zzxSNBYd{W8!Xi@De9<) z#oCpdA6Pg-8kT^R;GqViw;{6I1KkBmmAQ0?U=)e z{sZZm0hrWfJ?6b@c)V3TitK3c^|f)h#o)QB4yWggYysqI_4^lzNm2>_K}L#)B4;sS2%|P5kh%fP-XVJ(RL(e z|5(H!94=ImGL3Sx%_QZAV|QSdenF{BQ^1GQEL7s0qso@yQm>EhVUR%<-B4mNJhF*s z6jemm)|v!Co2$lAlC8c}w*3ODHi4SnFR*``*Q+$yY{Jj)YagI(<&4?;k*X^t6+FrqC3o z+hC-=+HDUbTb`+r6^dPRD(SiZBKY{i-@6SKU%N!r zrR0G`XpVu_F0;+4S#OHy>AF=zneSm_#t`k+!X1X{@SX8nJQJ7bmZcn7%C8j~9JGx~ zH{qBL3Rg*%wGkBdbyES?+2xD0U6UmpE~wC|`J*zUAgL|yGFg?Kc0{*nr}tuveZ$}L z-`R`V^|QpKsiC9Lx!!J{W}ziV8!h=~3h@c)2C-uZDBfv8~JEr@AiZAneCv38M3 zS>YTPmU%7{uuTw*&8kxmjEf6NLj(!J3sSl}3H!XI210dWX0(0_gJG|2M^%^b$Z=Am5;zs3S(B`4tuvJjt~>e; zf56SmHqwhAt1_AQdKP*4nIkZ>z51g{ItEm3CK2e$hAX`05$uMqO1O*e2PN9lW^`8@ zT3|Kh+JDg84d)d!KU822SSVb65{&lJR_VK8HNUAOrW7aG9%1%Jhs-Ww^(zu=tF7jo z(EBfv^HMYKWD>%5KO9`Y#*&5?x|H|}XCx2GnV(0H=$b=FOTg@;v4mtaUin6ijx_@GetRsgl?tdvg?y4TrF}sMu-rx+QvYUU znMdzkX2J@+&o-8IGj^L-+@J{JIOiP^A8bNjrHk1eHbuGX_`oxpgV-B+=Dg~_#)n;Y zIfLbS`X?~`C#j($m*WnC@6_8VqY;$mz5NThsLEI)yPaKZBvN<(CFkm-C{2gYf?KRH zd%a?2zr3X_6~~RvH4(MEHb7Ts3aw{ma{r)tcm<$m8v<;Ga@V!I{*@egK;?OJ&Zkk< zMgSS5k_%Eu;z*NBrbiz_9kMHygs}m8Ho~AdBQn3Hb-dWXloEyA@Wr~?x-l0f5+W1G zHpgWYow>HG--yvW1(G>&65~n&Ja+5y8o*3mc#Lc`_sB_dN1~=bJc~Joe9IY8P?lAj zQ3-7P-puYB^GT+6biG^foUA(XiVSvo>a`K1Ic%rrOSz@caWgl5T#M~EH55NRMJX4{ z@`A=m(LIDTUWI!$H@Iet6Uam^s7%oOH;nBgfVGH}QV3B=QjOm| zhPgCy-M(^xiaU9ZhrRii7nAHlY#y^&)Vkhn%}ok;WW>xi%kHkZqLt8Sy#Dx-w0|?% zNah!7;u}I*uq_RkG`?l@WyV&@UlOrHJ6ajt8OIpbPkQJ$$TNxSC8k1kN**qw#FY!4 zyl%}=2Z18V?5O?kb-(vxvwxD>qr}|oX&?BKNbMue#;)QPO{e$qe=ZtU+!uDV=&yya zUt<(^K!G98GQl|}_c-7^$SQ~eRz1o6aep$5Z6+WPE<1ieilI?v( z%D(&IavMYKeAkZPlEw$N-MsrPZeFRYb*857lGEE009UDnxL>)hHQBYQLWRKt;mYn5 zV!}J90a`&^ZhDAYmQWIw(UCw`gGj&s-pt*P`F|_`mq+** zmw%}axQ4!`Tqibs`RkeU=)!HKz^SxEIHH=jnh>-ZOHY-o8+28k%C0Xx!+e|M ze}l;FMXxoo-4?PhRh!h||J1bc%}t9BStj2yD+Oy19FK_9*`nq~Vo)Zb_8hOvzquEFT{EthJ-wmx7}H++D75e* zHG@D=Lc*n*C^~nXBXf1vFXRp^t8s`z{UYY>@r=}e4{hXPwv4w;6uXNCH)s~+lRJ*R z4__7w)-7K_W%oWoV^+~3KA54ktP5mV^s~B)Qwk89YkQfZc&ay1?@?fpN-KFx?tqsN zPYo~cO;|NtnN)gBiTI6!q7fv90iBu^li4X)d~RJr?hg@Wde;(a)$FxTju>h8UT9>b z^sc-I%}udUG#}?`B5}^x6-2?hNNQ5k?~nOjimP#W@t_5ug9qqT%8{H&OC5^K=AKO6 zJ@(`9yTbuj0iq}y^3AXN+YbW3x=5Z0ClyC)3RpC9w_45|t?gnyWSJtT`+#>i{qbve zMI^DN&1-Ht$oa*~`9&r3ZF2D;J9Rw-O#N`IsEhJoLtaifLnkv>ES<>@yx(~b&cKzP zp9R^fe0P3FcMOWzpHx)G5IwWl?wq4pc{|H&H}qL~cH|9TmtCW_wXxe5U@WOL0v|G3 zUM+)6Ah<|Qa5Cc;#avhHl3tE!cGPX{hi>;H2Xe|UT8qippT_pVoF(^QU`db@4U3c+DW}o(hg==9P9?`4z$YoeoBO}K|{`;ThGc3XnQ7|Xd_DZDUV)U*EfWI$} zy-H!j>2W`rlZ+r9f86>9_Cfp0NP#0H%VOO15&EhGhJ{F-7a#t2UeQw&EzLov((cOY zL%yVwhM7deg+72uM=9W6GUlwMkiX(Cvx_s@|5wR}j^ihh`R<~{*6VM;ez6?9d{MU_ z7+r?b+#^a-rTLke6+1&KRbq}7kV9XaBOlOh%Fs!rfOIi2UQbe17g~E6?I9;{6l_bT z0SL~s?HK>dX%WFlxC%}p&Mc6bB0XE15bz&r6UD#-T=jPq0Anww>gC-R7OogVvs7hp$?aQ{R6tx`U`9En_%9-p^nLbZ2Dy{@!t1RGtaE z{^1CBC_l$QnatDZd^41tZjtj%0<-VvENenKNrHM8fO|dwj8%f{F9m4mT_=raGF2$zQJzJh~jZCkKr#op9) z>OQ>rv)Ep|0v~U4UzI4W!)`tFKO7?1Opobsx+zO*97y~KJC-o}zx+XS3}Vv{asQ3*4HZp*MDcUdvq(BkcPDSwC-ur}1> zPfG(2Eku!kinm`lr&}nH*y-IM9Q?fdxz#Xrwrz6L`=m;@M$4cZw>M_ZFpadeGhxc| zrs)TuR_x_)&_rZ%k+)*9w;8mW_G(Tu5BLxPbvdi)X=Ewa?Hidh{*@fESq=C=zb!rm5_nbIrm`|MglS6zThJh^HjPte+9zWH+J~?RiAY zv@r?Ci&#O-2kmU_o$E*oXNSOJ%59`1gQA~QgF-&bM!kxeCE70X zWaC_{d)ukfWz}t3YB`tX9{OIceTV`(8I9ahumMiNg{v6o0k^p;OHNkZR4wB_EU=g5 zb;`L7pJ5iUn&WV~=Vo5IoFOt;q4X(VT};;`T9-#OT&63!xMC;?ZVHt~^b!5D=!xd8 z%2CD2W(g6k5kxAzS5iYd36*a974suEMa4Y2@gc)Bf(cV4OLGl6PtgI?uw%KQP2X#S zo{zsmf8RFnb5jxSnNFH1mTKHv%1(1c?gMN?^4+oR+=gl9UoSXd9#n>)I zXdRHE=seD$h^B=qo6r!p?`(fao0@3dZhQc{0PpEL{lkWP>#(4Qk}= zAp485Ns~Lij3_xx(F=t9#?6)goK)}r3RIX^Ee@(_T`L@B%VPb zhgUP2zl%HypRe&OH&>?2ab1e@I~?myV(m;>FPMm;H?4hpR9QtY<=sX_N7HpKPffS| zR!$n?;ZsC>2uk*}qlptzl3QV~7bQCs5R=$*!MI|g8liZmG1#~Kxs-OEo!xitd9wet zEBGaYTs<~OKKSE6HOT-Ok^HN3Tp+9np55*l-_7BqfgDOmAbu-CbmhEadDYEE%qxHk zpobmT_IO&l;9#cA51CX#InA7hIhZ9p;H`{LzwEBZymZ(*mpEUs%29co1qk&Ypypkr zl&6Y`1NgQ)AO0%LSNhzk9u1rGl+vXJdbav|q|z*()5Izb%Kl3V8b)v`s^;Xem0}E| ze(!;|`I;-6uG_FiwWWrygD`16ksUKGel(idx`(uVd;D+p?^JB5+CqRHj!`bB+MF7= zEm8^DH`=Bu1V(6N%c+#-e~ga4<Sy%_tj! zc@0PG1Ff2?M^)clTNFJb;{v;rxtQPlRNKe6z*E1$=k7JJ^@*aOi1^vHr6Eu*6UV=L!gqX}x<3>J?hvf60I0B&vV~oh%DaP9E-nz37CWhbXPK

_`QpAf4`n7$@f8)S)$#MIX-B2tZAe5T&bA(F7H2- z%I%*j&VCv!$zCt9&vz#hOXb%&Zm5b*&We>@{=N()b%>8yIrE%TG;Rg=P@q16#(^bkC z`LLl8Lw1V+s0AhtdGTvS2O8ldflSJL`_d5MPHisTf2z5jHymn8da(zom~DqHJa$t@ zjT-pyiG*ZviCD;ExgF&5!En@!wZ`Ka+Ojh$NqBh;}0+o}>zsHSf7kN-Yny8$(>N%xt@j3Vl4rvvKY{=ALE?|m-3&Z+ zTs9U)l!A|V#m>$hTSBU`BF8ub)|l!nD_*|NWSNp)8oF#W)lVg6v#%|g{g7i{zS>|A z8?v*rJ+aihAInccb@D@|^Kkya<2tNL*j^Hl4^mWj@MKNFyeMR~g#{5SI&rD(1QRBh zy<^-dCop#Y9CW3Un<#KLLLr0!iJVVE`1JXl_x=4knlh#5wcP`b1J5@)otXn zX-h@Bx8=?%n(X3Tiso-jogL(@a<0^XS3f(yFlY*H|6PEZXRAQ87YyfjXa{=pGfBqFil-PHEl(5j zQO`THb||`p5Z3NUVZ*u6vz|r8tkp60!+sAC`|ZD$+yBi88{?|0bmCgO`SZhm1<4Nt z{_!sDzhHk=U2o;#)Y?2XaPWT))E6f8mgu7Rx7gtfyVw!+oHY7%xq^8l1Uag)MQCj# z)h}z?{FhfXC_y@Tv2bOHW^kyeu!-qE7)iIo+Ws~%XY_(XD0u=&CbEazg@_(~R#Wlc z9^NEUV~30wwzI=o6<_K_+=^sP6R-CY?Wp4v7LFW(Pxml*L+%bXJB=wlzz4};_+XuF z^tJ4=if0QDY^? zZAwZDEME3dSVSaD;Q*Y3Y4Y7?kZ|L!C?Z$nnS4~q?4x}S6sj&=+|VFMW=@Lw!OD3c z5@37CTiM9=%gNkphCP@!!!+1MN7UL&DW&3Fbi#917(0e~;CQXCZ|dGI zkJAAZ_tjafeWnXnt0fr^nQvnU%Z8OB&V3sc1DcI+$4Bp)J|A4qVMr7%AC20g$Ry<_ zqvbM~OIvTwL`5Pl&3A$fTV{whM7S`DEyY=#4E42=7&aP>_!-*=9KvAMj}cg}{KB$4 zVG3{qo1=W;W^_^ibzuh>F1##oZlRc%=a`43`NYZoW-U|ajU42UihcK<`SSK;0kkeu z{mFJ%UFY~`1jZhkdASL2LK+R28zg%j5`fHyhadjAuQ`WFTue$oDYW>IEJp0tm5Cgj z*Yx}vZxeN^k-L^Y)%T$3SY)K#OSwFO*W5yjI@CTE{D2j#3Z(McE2kA~OLB34MZ3)` z81KS7VE_8Yngf=_rNfRxaML^ws42Ia3N9>=N+PF_vQ++g{kvTH#LQk$mK}8y@VgTS zA3{7S(PCps-t*S?zOX#l_GCsX007R}3r(@RWd$(rY_De2hCN~Yur`RKgIB|~AqYal zOOcq=67j3y_j0l5DmJC$UXhqvm~{6bSBJZdZ@}MOX!k?5qfu2=bOYhA1^cSQ9s!e102xiR5CAEWH!BqiLU!B1q~RYV?zbaQX&pYMC(3e`YmUvu=7+z|gKCv0yu~&i_KF#$ zCQ2#B2QzB`eXmqQ`TW_B98SQ0G%EvbP>T`t39JA&68*DSTEiajHed-CZJ6$H^r{R3 zwA&y2+VJakymlp3p%#a-*7HR{=n2v!)bvOSeZRmPs1^~MKqkO=J6ktG>2#B-#pL8H zs&G8e<_W_a9U?b1%%5LT9neg(VB)76Tf#G$)67okreqYSq~4*#lsQ(YBWIg=<&zwN zNWadqSk*Q;*P%WhNV9=bS9C;DuI9BI3qlnQiC-`;EG$1!(|toKMUa<(aN zuGlCi`*=SS>nP(s6jE~MT*Y>$8yYXKrW7F4_{^NX$sks4BOG#>qtkVp12_RN)+A8t zCc6H*YV5i1u$--;QbETnK*~td0BWx+gi7}E1NMvGGpN{h0g9cdxe(h3tl6+6v!Ack zBi`PVtPN^=!q`|D$H>AL_`I-qykS?F_Z)FP;)*KJZQ_)caJuJ%EHsZ`?D!Ix%{0ZB znNY*=-sPKw@-)*ih1u>MO#>yy`%)f_!ZwFG+AKL+yUXThLqD^YQrfd*Tq7fAC4$Xf zCKV~3yB%Qo?K+|C^kvp^cPsFR#U_MyD|oTqz$Vj8JdPEuu3-j~&9#)8EG6;puH7Eb zO?j%2VXD%#TEZn;Tra1Jn9<5a$ALuNPJ3m&6an7q480d@8c9I6Ru)3Lzef0Fj+H+) z4oS?QKU}%N#-2wn==LdC5s5HTu-6r5Y;BS4Tk@Y}{X!AR0y*OxS@dxw-9{30~OzcY!gA9ywf!Q4T! zv%j@*xn*|>#iaKU+yZ`IoEB;Dagzt;AQw-&BonIu$N=0x7%Yu}!U6U(SU|za(gJb% zvLRt1SR#Ow$=`LmZWKdt5?bjDSM6T$>eZMWD>y;`tJ*I%Cw7WWIh&)E` zUq%Y!rEudl9O1pj=k?>ax+e``#+ahE1IDoMO;ew;jXS7pFbZWjN9`WmW%bbRY-ZU* znfR{0WX$ZsnFPayA{x-P88_qIFik4 zriNa2)WAS49yv#+>7cka5^8pXO1LBrZO&9I$1RVOR@9l6&++{is}g_&L}_Dam>S(kmdfMRv5lM$vYi<$m-?VMb?dMhDeJ zFtW9R8TpIbeJyr0E-V%iM zGx8+p9Z!us$5W=VptcMM%<_#83WTce4cH6M<5-tzn=R58LcI zVc{vIK7nF-?dCSv;l$VFd_M;gah>JYbW1+s$KEKRuMfr%75eT4k1$UN3k36gR)U*@ z-a@)gc4f^-GhKE@E?+ztNN{=LN@A&bn2t6C%7i z)=B%ria|5IS@wqkdY6`(U*z$64~bHRQCU(DPo|>oZPh;tR0mj#w+9?uWh|C23l!tb zmH_Zk+x(G({qp>~W-G<}P%%ACEY_1xPr29Cs#z}6i1X!^Rf=FM@}HkkC^7Mq#qMtX zIPXpys3^0EL1(8x)3n?@|9eIqV#E8&(l&F{LSw0>4o@~&0=w9u)@&|Z{gW9={@;c2 z9M9XU3PR(x#qyd-OhDG(R>#KPp>*jCM3>t-rvyP(^0bv=%WSEKuD`m!ae6G{btSBR z2~Mt@76;dC9u1>P#TebZox0=g7;Xuj=>I`9>NWT7{->s|RD_>RqA7iKt8MrQ+fIux zYd(Sz{Dojt(d^V@yy9*@{i6LVjWMMvrJv~rs6dbcdq0S=fy;V@LJ|;K-Q#nlv($S! z6@zjN8nnSwnS(JXkr!KaLwAE>%MnnWA2tx?fRx+RNK6N9^Jw@$_Ja!Xv-2DaYTqIh z2Z9RpNr^n6L8mKrgM{j-+lasqpe(%heK&)X#o7auZKP-ZuAm6=XpEt zj_u?0(Ug*CT6~SwWvh)5M<=ZE--srTv}0lC=yUs{!3%@Vyq&TMQp+^Ys&kGJkM!&} zgb1o;kgtu}pZs*%)=3wBPDRyAs(qQCOO#bbb<$p_Ayk2(13M-$5qv6Q?R;T>ZH(m3 z-Ih|5Y%J3&Thy(69bM~7YC}FUCGNqBv&kqil7N#LPI4ZY87sfe zb$Rb49}|$NNAUKoRJ%AgKP6$cbWSV_>XG4OBA`O(knsq6kVa2~_}xsv7^i7f zU7eJMS1*Zo|F|mLrSQ_uZmJr9#w$Ffmr?09@=m=hLycBG$}JpJ!sD#Ueo~xre2Z@k zV_B@#P&b*o!B?MyexUR_Hk4i?Rxc}%iDB?xXfuC<3+B=Yd=SN`^|XO=M3TW2vcK7x zub~#>=W2uRug+G27YTIH*jp`~k5A#guxE-=z7Q65k|7&Pz|K)jLs5=e+wGS zN}5uhS99;&!yGyDxNpRxpl0IYLB_-a(FlbAWp?6JBus~k<3d)Z*3+drv#yrntcf8= z?2_0VxG*cf2Q-Y_CVA`qWzsYO|Fw+--Hz7^O?I!opX1T^EV__g<>U)cZXQOtEQAUW z`35MdyOPmIKiSMoub`VT4Kl6*fn2AjuGRdw84OTF_i^4%6z4867a zIuy+(Y$N+ty z$^?6EB=50WI=oQJOQWz9S!qlsDc!>)95DM$)VrE;<{ z`(oX8&F~s+6m6VN_H+PUDL{xp#zjI0M(?-gEUf%iGSEmHLPo1u~y=lvFxqV7GsBD6qXYVZg zf3`XY=c^&D4YtyY@97?;f?GV!D6|@(2mwRu^}OELIr=Kgu(P&K6Uu(cq50>1$544K z{@M)ZNL#&wtUrn(Mv@}plDLlR!&&IFik{Y>Up_YffLkwP0VvhiLi$RbT=DARzf9?A ze83xPD67p+tz8PYc-WhiMW7)wMEV$y-vAOc*dXrVC1ThkdA~Oq-V0%G8&rL$u*NEd zkF8TCT#J7~J%7m|qPC?y25eEpme%FK_Olvh!~?LQ_Z`6t;2(7NaXY(UhAv<~v#~S( z&&@Fu>uMNM8Gcf-V_PHfp|*8FrUO+So*R>&1%Ti$Jo?Wy1DCyzaB#G(~Qbb@y5O&hAX;{!DK3Yv2ngvr{xxQ$m`O*_7o- zDm&-n6VrAmo%_cgLDxm*A~pK@xb9}z6i^(Tw`lw~p9HTHQjV734^)^$bP z+*p;<(U_kR(lKQg1@BlbUj|wyV?RPDG1{8llCq!7%WhRP5F=FALW?^(fjSd~-yZ>p zn5DxpGoSwmIh`PJGd5m)Cj~t}%tbOO-_K5eP5L)BoUL-+)OUY?U^&!bD+njPuoyM- z0zp73KX1MEV-`P+NB577v$`oFHidIVV)EPkEzmu7Lxm5{gs4z=LP-o|zbQS;Ulqv+ zoH@IC(n3H6y$+%|HPt5-r#k_7=nN; zUF+6s4|5jhrX0n8VK;F^E|DD~AD|Gwkej6uz48vVAT-AbNVzGOhsn|ZfW3#W^;J+d zB&wNz0%!k+-beiMT*c-8i}mLX_F$RG9gt)?BK?1p1vVtyY5xQ7|M&X;eAT%LXxUTo z=kX6YfM(2z2_Kid`h~RY%l5~B3}^jzS1|N@wN>O5ab(QX+NTH0^3=U@R@~D+=v*J3 zPwQPj+nN{zd}zEeFfd+gi5W}qNN;{JoUI?ax*FF*k2F=z;_=tx<*ttz<1?#y11mLs zP+BV#CXCUu7vP4)XyK0W#qe;*IRXRrZNOus13yW3`1pC&tDnX`QwD-V6e9H@hMzFj z|EyhA0mJj`a8Iw4xW?Pzt4ys9#6X=;51!D@Chw zy+u!*pN?Yk_gF=yex|D7)F!>8DlIMZ&H)2hs~!y4ofQI3R1G%yG#->{TQ~UhYK%Vz z%zxi&VI}K~sEMY*owhKrXsCyZB*)3ykEjRB|^&21AuJ$J?{iKu1qWY%gS{hNY3*6RU;#O+^R>Q|D zWqE-H0U@qw1W)!1f9&*9w!ZPYnzYwHRhJ(DKLx2J6|G}8T^oP(zkCXJ%vAXuu%WA2r z!g?dN{(OOZ+nL7x<0pfzeMk8tyXSOzC7$PTAC9vd(=7#;!qO*zt3hB>c={FYn%(lW z{R3(Dn}e@PW9>1~wbctw+do4JpQH~$=BPC-b4V>Wv_3uksfaaZ7>lyR>nm&RhStKT z&q*j}kHY$uXszXEoicDznKzutbB&FIMx8HBo~m!9LtQ~QUb z1*UX#C4wc((wge;(s6xbZ9}k`$mb6u8#YJ33aRloZkgYVcSL)(h;TnJ?Fzb(v_oXCdA%K*s|s1_&}53G!SOP+#F%u)0^L}`y5 z6T9V??vcSuYPfDn!TZkUG;}Q8gCb)#0|yTVACf(z&lqLmWZy)xlnQ5C>t8sd!{Bq| zq%E}luQ>{5Kh%`XVdg1V8w6Q^NDLXOCf`q*D{u6p#8};YR_s_?;lM4i_@TE->QJH0 zrBLxZxaQ_l(`J#L^l_=ki zV}OIpM2dl(fGgLSCQB_x%oh@536dk`a+u0EmjgPkQgdR}A99N8fmhtdA17>*9<5R% z`c0F1@FO~E1bJ@9W4QH5ibrN+ujtA7N0;lBJxr%f5gOZ<|uc(>;@W8z+wbS)ccGu}J(xlbfE1 zdT&tKWoUxeXK*yLzEtoYNpQ|7ZL=+X33(0*iHqG^t zW^+?BlZfcf_sb@K8*?lN#|drU{-(qGJcP4JB)$+*F=8>q`QjGk0PWz%GaCn~jeTSg zmwUqYdo7m^@>Dfd{Bq?zV`OhGskkzanOQtdAg%mSEn%)yT?&RR{=Vsr5FO5&gOLX# zNxDNx^LQo9^~-|KKAgP1Qw|~>XQ~=Qv=KX6KaVCo?~Ef%P8jZey&yIL@2b<29i- zPs90ho5|%3tluvBOPehc&MdFF8x2=3@l=uNT8z~dzSIn$4$<2dkTm-J^_>VG<7%RC zBkw`Qkbj)~McQwXxY8FqozLQ;m{&sA}@?-l#3 z%!EY>nyC7vyhxlsEYx`pNwQM2-jBO6oT#6L7^I5D2ydRxv&iO84utzKe5Wm1LUGxL z0K#(jFuWaq1OYXOF%sdw*fWILP1%tQYIG3KKvX&11(%IT`F_YSZXnP*2QB>REaVe#jW zt*WVwBQ(vt_-x>X%YEoyeZOtwKY2*6?}!`;UdLI4k~Yrwng)?w3Zh$r2l~6e)dm81d zEEwMQDFFInIFp*=D-PHu*t=Q9`?Bv-}>PV|GIYhl+yX^OS-8jQw8S3B^Vv%5T9Jdb|t(S&q@zIl5qD5^&l z)1QmHexU6T8Iq_EJU;6Hxe8~T5?fa3OYEbL;(r{YkydSdI{C|=Zu$6%`iwJi%yG?p z?dSa<{S;jAf2GowLs-oRRX3x&fcFjH*8u?7}+7qj6F@*82vySjePOs9iP_Ok98{W9L zxilVkv(b;$2h!b)gBDTh&Nkc;{4`AdF!PC8{)3MxYbpZ!ECeE!!z~~m*H(|S(M|GL zopH9`7&-Yv?*uTM>(5z^fuWSYXnx)5eSqX_{W0A949RZ)On%Ss4c8f!3;VNh^Eeeh zt`%Cqt~x+XgZle4mKAzP1hwY2#I`IQ+4WF9$gRO>{426T_z;oM$N4cGnxQ%mY) z!KL+;&2&K|U+-Xb^B+|0lH)g1#bpVucaoeiF$Dt@-gXshGH0NSGnprZps1h@pK&ct0k%kP`dLcu$eP>)zh0i zZoc*XnTL{$X3aLko~CKp`E z6a<@2r9+`Awl8pxN~b(k$`HnKCsZF6^K^r77$yN6s`n-9?CxUj5Ztp+Wcom2jXg1V zuQYWNDMx`(;$5>SmK6&|$v8eX{Pw#$OH!}4j5Fj9lDL90vR%vjN4{cL^`Jcq0w|sN z&c*}D+izVu6r-^(2mEeQ#V$BS=x!O;r4Y4y(V)V1mPQUBBB(4{S}@_=a?cZKvwy8c z1wKvxBksW2I&ULK%Afw1#3-3FpRA|(uoi>Z#0KTwcpsntQPm4kY*eq+Q{6c$XvKj;? zDGi7lE0Hf=>xq=lFI}uf>ALaj>$5xCn*qWj z=tIn*+r{x>Ge#|6*M|Gsa+-PVr+8VZ0&%VQYvO-0THCMIu)K=s*h6oe?I)ZbDF1-; zv6zzTZ&wRZZ}uMcP8NH{{3FM$k7CMnx$Al~`KT zs0v|SON>W`(IB-=iv{Qjbh~Z6|NZgf07-72Un)HCQI^NFM|E<(T$^xN{bX)PIIQE) zbQxSkN`#kVJs!2N-+%Fe+}rP!sMlF?k^iI14a|Va|yW8*Fs9sKTg}tvu_Z~x=#i&YV_q30J9cffD14Gg&zFF-< z+0q@MKi3z{3$w8b>1f{PfjuN>IiQ-}^o*!7u>qabOJ6XJZ3Ava`uoBd7fA20Kh`j~ zl;`9Q1O)WA5t9;7uA*nj!&y>KlMJ9G;s!ZYCm1Ms_)JeSz!P{)Bm;FpW^Hu|#)Mh_ z1rN~VRZ~Y_a@qy3-0yi?pnmaJ|Of&!cbb{f9+S{zJ6q_)3&zQ;yOO!{E9ps)S3|s9 zG%O?{ye%@DK_R^2vR?dAUTM?U5zLX#bpbk2@SfKMK;2HHjs#D&XR>}KHsW*cyh4C@ z@++dw*P(0p(CCgzA+~S_e0(_){cX}0lzl9XH<1++^wMhRS(eEG__Kg=Ja@dGUOlRu z8dP@09Nh51+$+y9BN;rk4XWD(ayscMa8vPgfB&TvAW*!pm|%99$dTviipwU0+Gdys z)=scfJFEA&Zw&QQW&X;KX)}yjqpKfkjLg8G0;vhwMx|#A7)BYyZ5*1qzTTZBrWo#N z22HN@dNJ1gZckD!2QY^1+afw}&CgqX8n@|A+IG*M#(F7BJjv0%LIV}Z=BtB9z_dvY|TE4+1f1cG7x|N zsO^{hcdb2&DuKRqd+WSp_4yI&IKU8;e$@i-8TRAz{5$`8yQe?L0{uP+>Ij083rd?U zL0ev7VEHr^HR}$pW58@ewj{jfhTznA7)cYegB!;a95($m>VY>{VRHi<;SEc}E;Q_K zXlOqD2%{PYcuKh7C8?SvB9&vJj(TDqK@TEr?;@pa36pCAj$9?5hvmvPZ>!|F`-=+8 zZvJ>k-oLCjU&B<_P$~Id^8cx14Tv|kh*}k#6ND&XJ|00Yw#rrhJ(Vw<)Rf>NP}1ylM{z+i$sdaf`sQTA=FDsRWA}yP)LAsrdwSBJ>eDv?KgG z7)zCKA;L8X9d(KT{o|mQS6)5eD*E!}X|lM*Hxo!pWjO4QfsgmWh3(yTvGC?5LD?&0 zZL7j~-TZ--byL5zwYIu)s&ldV^kFs{=e~1dY&VPn{#AqUUBq;8>A*ZPxlN7GKEG_Z z&8b>-*?HIZin~d%qp^o;TSX2bZ=FVDHW;3UMN!ci>C?qZw$k0Fri%OY%aYvVMH2{v&r9#CY=-8_^ZXY z4BL??>>yM$Yvr60dEErjvN5B0e*2=ESS{FJI=SKRav#jm)=8|^CEGHy4yH@V8d|Zh zRk2D>g-mq+l@H5D&L#Y#)zohjxbu_y6vl!9P@<=~5UvdefZPQ1e9WSPldsCI>iuma zx!O08S_Bw-2sOS2A1!t@`}L&i7Iuih4UFaTuc(*jD>f^d+W_^HYM$y{NfGh=YKA(( zTpGW;0Neiuf!!5uP@2kLXKjS~Yg$4wYgvrf>#nfRU27URR_SDQIZ3T`;OrC^6GZJ8qrJ@F| zIlM%8MT~>{^xf)Wk&*Twf)?$LHJw3hbIA=-cSLrO)6~CUfIL#6A;Q(d3@H&F@I?Dl z;Hqza$O+Tr)R^Wlh@(d(0PJUpXyva*KYzHMYl0-{hXPJQTIv#FY%yB;78ZxkSHM6N ze%BI`P$h~Ef}@UxYrr`Y2}sEpA~M!c9jAbD428J>mnq%;A}u>BX#~E^(JFca z0(5kL^c=h(V3#jpn~WQsEtpr6wk4jhG=Qg{O?mua!F?FW@qJifGTPu*{{AJ}{om8% zZ{MXH*I*0ImIDJ_m{Tl?3|yb@zQx6|h1+MEPM=<3Xf4o&F~Cd>Xy<$u|4a`y;Jb-c zm0=kSNsC;4^`16aAF*v1xXi_pc~CHGsfIXb4#Y#} z$9bI)-!sQPH13#Dgn@j`X23-)>?VkeE*yc@27%ZK?$6QNHz+O$Yial9<+RG&O|M=` zgAQw9$VjRy9Hb5I&k52RXPkl8()^fF*U9$j5L>H#``5JnU%$h3h*Csww@&P;IWi-y zmi9rih^Nt?aA`VT+AxNruoCAZmPeZ^9dpbxm8VeDxL+JK!_6!oUBJ4FgRXfOM>64} z0tnMK$O}{gMs${Odu_R1A_t$_SiEDcSKz;JuFjrL#sBh8DSh@Cee7VR^}uD?LLfAO z=NQYV5dO@gLOzr?;GyBHLNFy}$Bpz4#nYIg@TLc*3`@xc?-|cxf?y0-AP5wrISeqy z=toz79mnC^pV(nAOCUv_FvtrijZcQzrU?j$2yme#@teNbAtu!Xe_oCEHB2@yHb6B6 zup1Cq7vSbFo7Bf}1&C+RW?CRyAI(kEwgY=yC0J(g>okn4jiBgZ?l;W{Km!U*`CNK^&Q9#VBQT(OKFJhD>jrATh7Jrqn<}ljgkE2%}9IlWN>U4X_19 zI8M{&=8VNuRPYD{02%1F{aD@EO*Q=W?J)?1no}6a+S*My8dVRC(?I(agbz0jgt36A zYg}i$qVx>m2-=co*CSqWk@vOKrypge7!UI|j!H08@6|Z{?dWH}dw|z59$I4L{G+KB zh^?qc#72B*1tQQUtT8CZ$fOWAK=2||H~BmolfD;(2-?%&3^hr@i3N*fZAjo#31Fq0 z60w%S3VoN*U?Qf%W@uz1G*8q|`fo1~oDj3Ua9n0{NXs&{(9-?FT)5uiB#s(*RxpoE zwEEeb*C;sKWU7oYfO={3EP+;Fxbx->g5dz`XD`*>6U-!wrH;m*tu)anVFU_uG7EupNjB4=S!%0y{AJ&Ug?6p!wZKIMmT* zRwX!@5DjWI6qvs0J?A;sXrOxf`-3NG{p}VEm`MWR;jCC2EGokP+q`l)4b}OtB?SnmHBoO>W>~pmMQ_nLe zY6rY$X6V2Ggh{PWPXDtPXv#bfXM24Q5B0q#DGkkk7o};N>$D;wl`sSZ!hv^KGS_e& zaGQgy#qa+iwQk%(lj+22*T6#H7f40lqyHb_({Bm}uBFM)NKt)w3eV#C4YFmL2(_5%WC&7ntYF;1csHb83L|YT9xE{oHH% zoly=3KR-&@HnAvhvvu)Y%76EHn%}&Y#$7Bd=Lk(Yw0~elOVOTeMx2o;21^UrXmN2) zm0KKSoBQ*MXNJYBkV6Txsfy+v^C z7sT`kTx$U>G3TNmT&KTa3IkLzGSOfJZFlk4_s(yo@fvBY@3f*(1bG>g(4tusxT2W% zr2q!MjoqmTWmr>P@UVG+vDX?Z|)!X1Zn!A>mEAq8k#Q_CPR2HBg9)TmAUt&=J z9@^)x+?QWuzziL@N%nWSf&c(O07*naRNqV;&kN0;5h!HG^utuzHn=xxv=XzK_Tgh= za8|2EZ-iND-cv7ll0a1u(7y4*h=maoUDgfIYrk+U*PE(rUxkkuSyNtIMNhbuCRj`# zbLUthCQ6$sL&tHPsBmBw0tcy5LCW1IwQ&jMn2>5SB2NpTt;no*hmq2i!zfz-*}VTm zKp7~bg_lxzF+ngqeFCwdd0{bhO_zK0C{0W$BM5|{8w+mcXk5KlZ_^qGKEb3PV49Ej z`sCe%NSL3xT9~f1i}pU)*hpv5c$({Ll*uSUIGO*=TnEz;u}$49Q{l?FHt<{hlX;6^ zpc@tg1a0=n{kv(&>PV4S;&RP~E)}%`&0iT<4GYHDg89z}5M65vrmMyY;m-P_wEO5; z8nAxAAf6&1P?`HfPv?2ssD{y8gr=~n;rmtls$aO0GEzOw84paCF@Sj0t3s&X5(w3~ zqXJ|Kj53K$t?mG8W`|4-p3`O=Il{YvM%z&HBfxB_fErm#>BWnbzWWvr5Z2g`xuYXy zUDffC7q?)V5WQx5Xta)vuA14Tdv5|f^xcZ25{fR#8CrE6JeeUNBrwf$ZUZLe962V5 zNY|HEjT*1Q>?95~TIqib4h~4`)FP~HAOJ64O^Ga=L~yuZTqZ|oEy(9G7-$1UNcWe# z+HuReO_0cCe9UQkpSao2_XyVRqBR4J4bph8UQW&1pQU}Qhdwu=%7h}NZ&rMjf!26H zxo!lP4)acTlWta=Ji06@1Pjh3DK!|YdZ$dH(1AV`BGX8 zhzpQWcd)*mcCqlVebWv?gD$XAm~nx(T7^zk^ex(h_87Cl*r96za6_eL8|%f%RKo_# zd8zelLCwR}=9myTw6D-?sZedbOOv1PqD2hTj2Kn(^0mn9OQe!ceJ<@g44@kKyw_Wmx!NW<<=Ee^eLD)LcxWEW1cZaGi!p+5nyL7ZBuv( zZ}B-6YBCAsni3)EJiz1V3e5!L_nErmgrCB}^m|?M&V_vwx5YCBPh?v8Lklr3g-d-1kwE}(^-K^p(~F!f0s=rfiHLVO4e_R%a?zWyq; zSSi}rScf7=#cVWXM=%8XNn5UQ$Qc581Hr(IDcJX!6hw5;H4l2t=L(3Jhmr{Gx57aM zuqHwAJO=*paE|4>ix$V=Lf13UNL3aI++YVbs5Zi7s2XF6oo_mxJ%66$bNS zdSt%34r4t-WUk#had&`m=)Xj#grmf%={pf(;RX9Dyw`vBA&*N*a z{Vo;f*3&*K;s?;sHZIv=2mxar_JL=j#MPU8g2Rt(k2dl!ZHYbcn3n7?9!CJZre8qU zv#JgguQJ0t7Q8za-8rra(Usg|ysO~T6x`bT%lBzc9IC!?8aES+8zpZ}cHjRBW3zEI+XLl1#OClopgY0KOMUCNy~9EUC;k8n}Zqzruu51P9m5h1Jv+eOGt zAz%f7?mWE<;&Ld({3f4pv8L|7CWYZLVyiib%BeEuNoRltJb>%C&s1A^M&*Lm6{{-i zI9%((2YSRia5;|SM1?~{=ow5d(}i)qBVf0G_h+;yBqsw^8z|I5A&M0y^9+Qq#xueP zzkM)DJ4~PiN8Bc|LiF3M;Ajj+3Up?gWtLaqni`sof!GEJ5X!6ciXm3c-vX!$N6y3tIffE#%qk$O?2xKzbqenaKccoiGf9{fguHL$uPG7s0 zMwrYKH402w6)YW_W`yKeIo32vfBj3^hiQ*oVGV(~0$lfweokEux-7Vf*RPLCSVUZf zpA;0HtB&gXjPa?()UXzI@i$K&JWloJ#6s`^b%9I=LFK~5fM#=T1-C@Dzs4*c)gd zU?Y|w3=|7w^z?b!$E~yqo-DCMnxEZD*^L|NHSU%^P1Osf3UM?UA%aEl2@mH`A?fPn z768n+CQ?aRsoUl{7gSm}QOh|5s9cH;h|krbs4+-udr{BHHW7HmCJWY1ucz+K8)O9H zHbsdWFjg%+lMdFnu0?-_T9n4x_|8m)g=(54K2seY>3oylULj?UQN>KA4px6E#y!LCL`AgYP9ZSK|w% zh-=Gtot}c?5C`eAN5C^?$?vpBU!-*d+48aud__3xCXVThQk2q(Ind4PO6)uy@BRSpi!yZ=6nleZac~E-lP#iA%h?`FP%^8y!O1GjE4@u zYr{O#^H*v1atDoNnl`@qEKSg8Mg(CewAUgVsf%UNz$AZv|6yb+tp4Gv)c)O9X|{PP zDkQ4&%X~7JRlaPd@hNjaFMlG=PUg#c zFRhcRnEvzso;n0JWdLdv9wNT3s+t-g8vEk?o5&!U!i+_B&0GaoWNGN%1W5z@+#AFY zt^D?%qrX>zBHSjp_G%sc=ohb~)|Y)W$lcVX$k;lJ#@zn#!$(+y#CZ_#gDyd2+$o!1 zew7L|xiO}=B_i5f+dPLhy}?2@JET>>p;oEj2wX>yge!`3o+4yKwpoFOMXUz`a|N|= zDeK}G$ak(vVO8k#r4yy#!o@O9fU70P znsIrQ!C1BA5qO&;ywyIf`dNrWz%DXl5W-6=v%RO!aMNLnbkVb~oJ-5A7t=m*F+#y8 z8Q_dsEb}W4sW51jxE{#buRunv3CkQI{q%7f?QW5yu#(m=v3hjHzrnYY33dSf&2X<;7-RySA+kf9WG!OxE0nZO8+ zGYvK7OXW$oN+E*`A%J4Dy_?pEA*f84#$A8+HZ`6+NV9`$k^{u}=yOG% zocg54i3*1X;#GgL+G894=H8%AP8UXz6WClndx~ddnEJS5R>+%f z{r>kUef>o+4vb|q31;o_({wP#ns{@Nc5zj$T)UB)w^%rH>NN6$>BCjg*xO;nXg_T~ zdIYdjIz?NJo7Zq5olW~#7k$vhEDb4Zfreuttce5$as?`ZIAA`LDWa83*|iS-)88oS z@pBw%j!^=EHbQvNPNXL?<{?BdM|*FQT{1>EyhCtUT9~Ay(EN;jsQ8^D7LvDHtl}=U zgDHeH1%Y}9uUEY9il{tLq}BXw@VyD*T?UcoAY7ko8a!7ftGf*T5DY_&v2NZu$5FghNjfeE*h(iCby5wMBdfYYh#v#aQd4`ZcMu zBqPfST&mkDTT!T;OF4|xG1#7z%f=*ZN7#e{yg6}*J;n1QL95b%^aFS;4wo`WR0a=w~ zr~$wM^EP2}bDxyoX;;@Jygo>&!9f-^dsW z%9W|Gj-?H+Y5}u3W=E{*hF(n1Rk3pI({UUpDjW;|%g~4xq~+@A)cKNvav)rotsv;a zYLQ^l3M-z+q_|C(uGSYUAvifnJm`;SVz8tRxv`G8j~--i{u@^ROb}E zdcTJC03#lP$ZIq52s)T=6ku`xhqq{K(%%7pb~JktN`XuuRS#<;`p7fP7742Wf+M#T zTmhN69yH-Q=*@XeNoJ>=nc?`KjPO5?rGXx?Lg*1cV}MzH}L&k$PlU zG9r0(C;+Ng5I+bO%;Yx^tr8YEgvx*^!k~6~GcC?^i6e{*0@PMKI?^Jhy@Q7(GV4bxz=#AqYA!b36_uqhwrkC;8VK@L_ zHihtO2-Atolxsvg-9R%m{fHycPS$Iu4$4%YY@8`pu~z34yjSF_%|_}dpW5K5>Dvm> zCU9(yVK^@yr`FFurgc(mYrE?N$~V#sS}CxyhX|Pw*P1ASG@%7s6Q+C>AD{=1OfFBK zm$-6e@PgX1-D=;uJ@g8+(c%XWeonKyKcp6dtwsMZog?UO=^N)G7&kD1gs~tL=(LX# z*xtti;M@#BGGkAQ#o}F9_j3fGFjVW;>S*9PHddo6t;P7J3Lg5Vt3?46wTI$apbveF z%rEY7EXD?6(I59QlK5Bl$8p1d9G5wD3S(dqxDQgwGqid{Nwcly@T4?9Al9*+w!ZyK z8e0Goy0xk@gdIxFgnk)Us%`hvI{k2==KP(CQ$LMyFCs=6kpi9bz@bl{T+`ID3#9kX zasL(Fbz-PsI&kdL2QULg69EC=nCQl(pU&NJ949IqlyYS*GLwz}ZAM^Dsb^}VQ;(m3 z&kUT2pS8(cz+^o@lOCf*uW^2f*0~L%o^X8wQ+fsi-hr`Nnn}q5a0h^Rp4Ox_Frqc2R^~O9U3L;3E3zY$Lpgv*AwyKFv{bJ?M=ayGK2qWY z3k2ChQ(9?((0o>_5w(S-!MYU~N*6*I;nrbW78tHem1s}Y=l9qM3G7xQGXMJnvj+wq zey`5?Js85_`48WDt|ck--(Ubo(0_(7EtsDvag`MqK#R<=1zPG-p9Y=F89$Fp1FEhL zDO)Yf$uZ+M`UXs_HxmbAKZ@R-MTUMj>|10X(Uvr?hV0>dXhCt7KFiExTHXkBJddj+ zj>B=5UykpuI;Ib{2MTvE=0jh_bN%j$#n>o`YXa^v59gF4EGOxJ|Hf|WxEL2?HJRz= zQClNT(@Z)T!vKp->sGwQ$p+KXeFMJn44G2fN&z-RIyD4^E8}^Zv#7L(kg9dbc7oZc zxJ1n&I_%=Az+2%)cSQ4la@lnlr@h$VZrMS38IKmeA+)JDjdcjW8~<| zJei0Y!1DmD0=+DmtIw~1!7tE^Cge&_S%zQhk^fAtmlJJjmrTJxt3l4$CKU^>U%=Fa zm`JU|aUaaw`ZNZo-BTc)y^v0Q`3D#pZ0+<}gaeF>*)mVqV4c}KKHI9OqRW7nX0zC< zvIIAa&!vXy_g{{f8z#2&|0$d#f8H6ZzF$6+%od4|QHNk}DuR6^mG(UqUP#`{W`9P%!{Pia$u7-@zufrFQ?$TXue zC&V=&xHd?4uBBfKH+20w2dKbw#=> zeXjb=4|IkZ3z!@dJh12L|4$D4A*&2cm0ked=uHLl=qC!2%8AyZoXkRkx*{D@C`kMj ze`3QV7~hg)2CdoH9gswdS+7gpKa2#|XMiQUM+85z0)!Ud!)RA$Sv;M-uq2o~M_ z$D}u7QNolO=gy>p!i7`H$wbV{nE=+oJbCv2=%`@*@ij~zE;D@X&J*~ x|$%Q^F1 zzrdOd7m1NsHH9&T%NE5>fvzQBP}ddk%exQKfEc2CQMAr%q%LbivWu6}KG|%#D1Ekc zW!99i&B+W)&sjKkFib-h9LCv0T!?-mctep84xU#~#PGNhc+<$QjyOmERSKCJaP zr8Qh+Ipb=QV3H_(Qfsp^9v2-Fo_+!e7=O7?ICQI&!}~JgmBugke}0hmA3ls^0ILx< zTeN}jpadiRRCIm%<3xpnh`_9b=Ah>91c2kA`8YJQ4O}&?Mp+~H+Bts#V1T%%(U@2{ z+dz9>mzlu0SD2I@rq`L#z4C>Vhv^t6l5{1B8{~vTQKE~(`NuAt8Ud+IB#wJ z2_~;*ZfPADB1GfzJJ!tPu)v=_Yqc2#naEZlQy`K3<(a-ByCFW8=M*}gmkEf--WS>j z3^RemJv5>kX62-mJP;t$$j~Eu8=}>tdv=#nG zUI9z{&tY~|2tn=)(J5SD>@)BHf&q6?XsFvWV*G9eYUYwcIYx-xr&qRenPXbv|T1C^&k7-V2fpQ4I9@zQvdj7||cwhC^etI|; zhadP2xPnivRgx@otd$=0x+G0^`toTSu;wZ|cPSMcn^cVD{meQ_9&1qG58~&H_ldj$ zp^yV-SUv%F=8i?M>Bk?^&i6k69AcipzqoNd6~tScYa3~R@Px?(Z_)xW7U!A>7q!+9 z3_0go%;DhKv$Xr|_o=`=yUG@$O=>zVD6Y6ipE2f-3bQDuyoXI}7I=A$jr z;NJ7KWS|QP1i!c%rdn3y{~O2xsIK4@Aq;+6GI&hMiTtzAQuq3`Qb_0{hKZ@kf+iu< zoP`Rhej%gsT!m|1Q~l<(GSvcYs{Zy>O8@m=()iBzFiPSb_-RGPNK->Y(X@6kUuTyL zEmqlf&^(rCMLYlf|4yU3-^ZQ>`YH8YYbv|1wyaXkYnKd{bu~7$Lj&3(oC|XdO6!j` za4F+d7*|JFH$GoRba)(PUPnL6vda2QzzxE*9an3ry_&W-c=ZB-3+$QKphz;NtSMAu zQD0)M!?T}$OvR;3Y2^>!q~a3d|H(0|uym<}8L zf`FoYw}iy~9-ik$x=>gH zR~k_?@ZINtDeMHu_z)PrOmqnM48O(VGRMS=i%VQ7Cm^g2`zuYy7&Lb{xT^dPrRzoD z=|NK&>s5B||CHYU<6g<48dIPEVU#p%KKTjbR(WHk5vkpj zGlzGzq&r+=`+79+bhH>xhitzE3gRva9eeaWTuH)&MPO6hgAwpYXc&)MV_pg-r;P>T zz8TiqS_LoUgQ_1UDrlt~$8n;wh`NnWx`}JV z`t>e&+o8JZSSD^g4cbuCp2Og+V(2Q;%-|anq?=0ZOF7_V$&&4>VylAx&&tTf=)#u!e9Myn0C%6SamQmL>#UV^P=~W}p^nz|;AocZ^t# z56zF^DLCw}0xh;^FU|Ibq-a(0jB3<1aZJv_$kJBc;>j@N*KzJ$|wC28#!dO zoQ{kl8HVHjpxp{fVnZ-3=SiZB`9cYrlbE4(tH$hFm0)^k$vaP9q-_>g3cJnQw^Q-e zm(eB;*=Vax+xp`ZOl|)BC5#Jru(G=M`K>f0jd%yGbYu(wo}|Ste9MmiV(B=F3NNbn zU*LNb*5zl}s+zA0NVU>+x43A|fU@gzwwb?yI4o^IgHoUCn{=t@k{WZ(7}`)uSrR8` z**3f%c!_($!rxt=5%Ch?7)NC(RQTXYa0;O#UU_#lF^<6(q#efx90%Esp6f6l7C{`L zpp7Zr(*>Sw_z0_yNQ@!Iz&N5zJYzOSvHI3i%Z&u1-)NbAK* z5N=w=rb3`lHzB14{hE9tj?42_9XHQfr>~~4>ne1St%{ifJuE)@bR5Tt3I{b$foGYF zCM7HuKam3V_n%T7ttJCY7chIZf|)6$NPX89pgxrhLZlFpnQYBwHzEn&CFkn|Kve>w z{t5^b6)m|p(k`*NtewIApi zUPYc4h!$E0jU8fPko6ow&nq=B`PMQh-^R zY#P0V(+XTaZPr1Ub~r;bDFx;)s1!s(e}C(!h{$*0>-r5RAxMpLQE-oM1*Z4x+4Ix^ zeoeVLQx%erKDODg1&NzB-=n=!E~WAFPplBe_X<3V3m5RGay>9taZ->mq<~cf3!QU_ zhCk(8%LoFN%%w19o`iES%)~F?U8DH zv53;eo))l7&=||sV~fg~h5gR5w&wyx@o*>YBZ6FoYaJ&Kg-2;Y%9Ih&z_k(x)q7mX zpBUrem+G@D*g8h;yKVs1V7xfbAoxYXWvtAltC2xVK4T&HT>BwxfQcJ=h3gaC&8)*k z|K_gYL10+*z(sYDu)+O~kCYZPXP70=iAuoIp^xNcf9zYl{LHgB9p8Qb_=xL3n+#>l z%&~I(=s0BJt?Jeyrn8PR)|L+gA8mF)XIphsY#Ezgj||19MW3BJt)d13IN|j;bL(b0 zMS@M26#ASb4pOX0FJ`eSdX{j}J?6^sr4|ucQwjnz^t!x~Wrk3&>R96St`Oq|FDuw( z&hjMm_T%AvlJ6%f9PDv08Gz_6S=QjGk=oU?W(>3EA|KN;&1z}@WGJ66FmRZRhfDBd zG9WBBOIPc3i$FGAVp5dMuKENm(2AmNJXiv6)C&Xh(H_85a}xpF``LXPa(w(tHkvtj z@-l+dmhsRJTOFEwB6FCcLEVSHaGs9FmtaUTAzmKUdk~JyLfw~3>x{4a`Ay0xTqc!` zEDZWm17XyV4Ms_zZnc@%Ce+}0pFD9(IJ@Ai{g3~FeJUsm07hC4I5yCc@O-H`P=rwA znZIG3!5%4rWk>ick*j5u7S*9l`_M-(tBzmwo0AoBRljo+&7m#v7J+w>pB=E4Xr~z| z$2Ds!F{XrH6tjWsB;pY))=F0{r*+~H6KV>LDPcYD!PHjSQ-gf{oS<51g#cuZfaT%4 z!S`?t#H>3KMUyZGZn_NORWcomm&IIhOC=1dtdsIiZae0qgclt+!jycZ=TQyHQ8?$~ zuiq5}GGRs)E+rVIr9Wamfvxx-tNvjSu3@p#ZNzARON)D5<-8=8vv~99Hg;{on5C%=rQCPcJuZ=aoLSjf_D*@vXO?Zq#9ytJdVfhJf`sP>N*Uqr}d-TaT%ZYDZ zApyWD26cqO1WRs=+js!1tN~}0Rwf!ZJ5~mBC!$;KW~^dBn@f^8Ce|!dnE(!^z-xVJ zMVa)%H=p{+j}sLRv4~89MtjJ?j!q!FmI8k3EBHdMU1eu1m<3W%^Hf2iOULE7tApn>p?vecJOy=xRI9OIfjVx;0z}Q6U z;geUaz-3Z+rl2m7h&$~AlVKJ?iBMn|Crc*AkB3J^RK7EX@$$K9x@M+KeJ2I2!|qn; zCZ;{{4fD$lw3%Tw!-bH6n68j^DgF2^=)zeFIo!e(Frt#_1nC1Fdc_{6PfNm|= zz#W&vq^I47EHFgFS0KtzFdr5yn(0&daZJXCK|=r-J&yiY<2Zc(t+ABaWciEpC*iA77Tq_)R{jfhS8+ zz{zMxT{gm5vQPJrWQ7iHkLlm<5xgXT2}4yt)LdQ3!ZOR9m_^gT$KAPb3EE)40W9+= z&va)+g@UV+Tevx0dFv6di%$-NJ|6k(I6QaseEiJ4T9yqMT#ffwc*r>m|Fxh|_h&dm zaD7MyqV<<7q{xtCZ-_2^PFSC9Hs+G)yH|q61`{|Umal+AvOp2U)KFKDD-cwO(#sd2 z;1Oj5=eUOXj$(qqP!ECv!@iovZ9Kr^SA7rMkJF)Wsz_N*N1ThH^SfDds|cv+ljkV4 zq^6!?+g_MA(1Ul$q!B9I=QgkkLsNR)RZbpbvCkR|-|{jwE7*EHFpTlj-va(*AUYOx z0xw9)m z!#^bbQ#%K~q%HG$FggwBsS;cB{(l*6oxLci4mA zN1R)sFCPps1{m%Yo-6aWpW&NlEHcW7OYk|U)xL3mSOrY1E|?O6W$$AwjMsO-#Jh9p z+#LTAOb2|?7p`_2%1}J^keMnB7Sf%R{Om(~*{$(`ryM-1<2+@Ni39X+z#3 zvlfnKoQ06UoUk80xPpz=wte)N;q{2(ormMf*bz*E zV77#(i{J!wh9qt>U+#2ehR*^4u}^OO{O+Ulj`|}v{`eIDUc?45eKl6BlR#3L4M11x zl?bYUaa3PLGu3r7bs-&nX=B&BVo|0vL))c=Qk!FRViomyCVO~cW)r;9zJ`l6IA`5?A|as>Oz6i_*BQgGj*$M#@J-T|d_ygn zPrPI_T!f$qPcJ`4kL&=jvLnTunnYY@MObZiVe(tY(er`a-mZGb5e!+xr5|k`w+hm( zvO!ColH3ajxsFRIBcDB8x)|xY6E}llahyAOwPg%f&r|#9J((xK7Gd1FeFH(V#=;>p zF(@&BFt4#ZyvDVr|C;$CX!Np4Go=a5gJCeyPx^vH<;4ZyiPKW{l)l z%KPNM=NHI4;(oL7wwQ-4;JwcK0?V_b0z^E`Nn4bv<5Cz;RT4%KZ@K1OeF72^ePACGEX6v#j@8=7_v4WA1%FXJa#E5cJY zs}h;R(1YdDkA`$1v*&P(F_!ooUxFuu@`MtFQ<{=6vm9s#0bRNw zSdImzHx*Q-5&lD+HQJhj>oev>moiVMZ?pvu%DL9Sw$+by z2pU(^v*21Et;U?^&WKwsNhlZ*yBq;WV-vrnw3Z$CfA{x9g#(ZRLZ$X@n&Jp#JOIHg zZ@kZWd6(N9&ghjTC|ukldsfqtIawP>%~fj8nQR$e@CPQe2a{N#fzXr%7)*_+ia@8q zPZ2INpu@c~e_1%<2}I3_RTSwxX=MX61Xn6s1|Z5|V?D?mmFxycBW%HMB9HY!ToJgZt8;#iG5Enl zMj$}~|2A#-yaKJxDzI)DIl@{vd*KQdNzbz+n))&_dH%VhUSC*Y5f)#4oi5?`ZWD}e zv0!nxMSBF=CPT^);Hxs_wm}6`bNb!)0lG(^GM9{98DQ6|zS=j(h_vEtCGZXh2Qigb zm)CP8-%sYK;1~T92jZXy_7@!RkeEYpV7@-!7h?%Gfw;S9qu_As> z^2v~_KZ?{KC~O9+g}EO?L-NgXUc{MCfAF}II|ihsntkNfhhZiHygq}R$kdpKE)v%a zrH))dW-fU!2?mEebT7CNt_dS*otm4-c2d6yG#eUXmq30zLXohf<2vv?D`W9!)Kh^i@-Lvso z?FH0qGe=!DeHJS%A3jL)XV1}A2Q1K9N%`e#m=r{A*fV1ekr2a+!1)0ssQWBtu?|*! z^$M%j5y%kDm=tBxm52KjFSCXS@XBKqEjR&T>u()nU7C@lR;tTPvq|P^W}8}~{H5$G zS}JbdAW4ag8r(7oydA%Jp5`byd<4h9O=3E&ix<*} zd3ghKw&qx5QX%Bck=Ke=S&sw*o~NP{{W|P>_1X98P>2ZAU%4Ea3rQiY9Q()j*aIC6 zw|)Ie+PHo*?Xx<2WZVQCjxyt1XCGG4(%qaYefMKJ_#3VyiqEaHg1GZf|ByPL-%d+h zVtt0Mi#cME;9xxzka=^s6HTANQBz`PEM!zAWGsDx6xK zeH5|fcj@5HcWFR^PVd}>)V_5yEpa_tkIXu7BeJH< zS}PL=%<$5sB`!H5LaH5+^_+YEoXmj?hHQ zc=?hY#3*wM&=>9%$@QTH0|0BrEZJbgIzw}WL;Kb9)F9{EMPNo@7ih5yh-6#M6D>d& zmPNU^DTG4~|B+D&*fBx9<@MXC^#yT?%a_tBF0C1pKj1z*hzthpiV24$0et>4(F!M# z|9>Vrd^1>4`Yddy0l22+-EgiO>&XXE& zIRl<^B>V|&{OG8T?{fvON{jVLp6RSVTZKnIn2L4+l;c;eKYm2z{b=G%W*;E#J8;CnqaV6>TZ@4dtjhA z)X-S%hgLch1^np3Ax{1l!}<8!(eH5|j5*$UjxL(H?iilkx_X_;ynmzsCc3wAH|A82 zfOb4j4Hrt6pmlojj;&$u(??2u!$ezW&)}xKoVF<$VsVTy%4KBfAzZBCbRI>j$?D-f z0*FIkBHf1If-cIjggA+3c;3V3i4*qoV_Gnbf&-&EIi3$hw z9;OEE1;kvS*{7>WtqZyZ{8$BiZTK) zCaY?O7Uc(|vKc@6j%`CTH5lZP8ITc_Oo?_qRo4_^D+DmT`(v8DAWnit z5Lzbph?J&ny5e^P3>Ksy=4AF{WSm$D^5FH$v^%0wEtnZ4z|n6+b;o2zgqG6)xJ-Sq z!g59x+7T`r*HBot!!cE*8|>rJqhe-NKaUc8bzOB05x=lRmSmO8&~T=2C@nyL2h_f* zU%wRTlna83R+d!@4$YGO7bsU&{z12+UY79>76U;jJ9i@WdnlIPfFFk8l@(xCNF9sZ`9Uss*uC7jnzz~=o!B*xbHooWCw9K#c;WuFRF>X}fefK4M)>ReRHauGEaRQ#tpzRL( zT)cNHQIszQpL45{rlxN8PC+d-E0qrM?JtM*w8su>P#?IE6AR2x+Gq!wbkV4mll!@> zN;d#*-a?CJ(+5*t8Au2S*Bt2%c167cLB11;CZB>zV|>5hPT&;fP^+yxT|WRNMjN`J zO`|19fbxce3#+1l=eQ}^@a?-a`tj%BNo!coE#huNF4f94A;j2dV`n>U|L|kVsg1SL zqCOKE(pOOz>}0oq^E0E;BD*>U3pf zXebg5|H9evwSv_q^lr=gZ*JKkita4RAn;g+8VsxN`2OK%s_g# z12j)Mf=aaQg1iNs+(mskZY>#x?K!roea9q^7$sW70QWi4Ac{jN!j#Jff2Q4}ze1pL6u6i2h>+;dp$F)N1L=gmF%A71bb!#cO@)1}4FGf~A` z#`Ku;Jzh23iN*r#n@WRxp~t+si`){QytSTO$fw2Vj*eQ$DumWKQ;~}O#QAKGeV#ig zy0?LlbupRiX1Ia0S4BdpAVz`$Hw3ifPkFC%71a{>B`i_kZKjx3t8~v}pn3WM&ikwm zINt@XEA**LpC-f!vK_WO=K3D>%{tJj9u7;_xOT%jW2b296jHoLEN+v2We5UyN7M?a zV~up^R|mYc>-)F@+qj~7ls>7GWHaBVLyTvgIK!0vZOZIPVleQxv4Rx%Fl$c3CqIJM zeezcSod)&cG1DNwOxHYkG+jQ8K@Qw_Z2E#m!Ge!EOk;s?$j~$jR@a)zFW zy8whWzMz?|mEb*)R`kiLrsyYucv0e*{${!dI48q`1kg@OhENu|6J^W8{Ty885AAss zZ3AvJ=jA$?4F;wVjus!&&z#HnZJT!MY>F^NGp~URd`$}$jP-6_X9M@us2Lxd%+t>x zo*)uY6C9EuKyZM9a>rEf#55fc{E`vq4znPdoJ;La-?f3s$Y8>);M#(6(U{H)Ta#hI zd;qqiSKjHiF$U13vgG0_>n`41W4q99TL0|xG`VmE%@r1pHD^uz5au*v&s@AHj<_>mi4SDb$gUGMEtLJP_8x-_i>7q-ET7w?#G*us0?a z>A|5~z%YQhfnk*bab2b)Lw8^~41OKKFm`Rgq>rFwVsI;VfMsHru?^sTzNBYxU>NB<2Gu$&xHE>EzTNbB=1yo1TbK##Woz2eKmoBO1iqjb(2%U3ZmzAzH zRwo;u=%aPF!Gks~rB$9IoQ=)77C}5UriX?YYbEMrR>ORSq9QNAva zwgmzh1Wykhrku@ACTOerTIFG~_>dCx$D(Fnt5X%{r4uy^g#qLWdGpMe2oRF<{tl|c zq+n#c)NpMsQtH$SctC9F$A#`oZP|;p8#mI*O*U*H3uB&9j~!%`q1zv~4Rpn5dcB=S zcYaC-1fgj(waM^l{jaP6G1V71_$<#y{h2`Fvd;>)fFa$0Rj{uGLQgji?XODw#3d}3 zFl10>vBXB^+`~;VVp^0dFjn6f6W7H%gHkhtr9oH-6#T)BD_28{s*@#BTi>K41G)ET zp#|fRF9@;77z6fdUG}TVv!@VX2Y9$YM-JhPFw1KozYM&|T9+A%=%y^r3V*fTaxxgB z{qj9Skn1K|NcA4}7iQSu89A#?$F$c(a5Sj~+Q)LTh#90>UVF@cBh{I)S(aW`jdK(; zBd!Qfgs~`smcz@zi7WgjlZ)}iDbE5goNoZqo#Di}uqJ4`2TsjlFoXAR*=_AbFkVw; zbxF>*NCbHPEcWnLXt*2SnEl8bR0Q1v4+b#WeZV$fWzECw+w8p9NjU;pZww+M`dIbA z_Y&8CIrH)NUw$dCog$<{Nce+e0>@m*eTqSX7U?GA>N9U2aEtR)bYV+OuZ7LQ_{QLY z3-{E_3kzl*Y^U=RT)whzn7&NXxJk}$H5AG8>J_V(txU^!eXb7zw-f}CW#|kDKicKZ z3+S646u{CnbfG7L8Ec}rn3`&k-u*XP^drNC+Mx7{HI*V;iMiF`MUTa0~(vN(pXwOCYoYJCG27Rs9Arq9Q5oTzYc`eG5AvjkkC zJ`%_isxFgMN0Y8Mkkj~^-PLK#c8kT8YiV}r;gAD zdbm{}R+-+@N9paK{>%nlJE=zvq45`=rPgO(q%OM-p2)$NOMDQnGQkorbjA5kn~ah{ z2tInlOW|7`G91UE)%wdJ;wogh2S2ykcWySD1swnSuc?3MZ>dfFqz)RVO3no9q02SB z0XG1Xe1Q96x|`l_{gAf71ADit&z?#GYX*ETT}X>HTuxZo;=UG(hj^?Y2z=&yB^0Xn zxajb@>a*`MR23uzn^wM=hXrxA+LI?Kv0GXLRX!LF3PM?7mBbR5=-8wRQ#tS7Pc0a6 zVdBQCXKDCfVET8yO%7*p6o=F= zF$KoLRlE*M4O(_~6b{n5a$=}*1dvr;W^S6qOj%X6J=ji*hxgO&pZ;5#APA;d09rHF zPclH9*n^_U^E=cs(^4Cv(9Ivf^buZTaUP}*^2bW`9`IKIJa{!odpmE^HouR7m5SsR zop8ghvy9C>wa4rnw}<$Y%LOi`0pxo!dnmN_o? zJmh5&yFQb%192FL)x|N!QQN}LtyWS?UNwy-h<}G*dvPHy-)XjHvHQu?8 zuT?*N_a)IQ+=7vUa0q^`<~h8EN_7IP3-*(6OTWq4jnp~4o|eP|cF0!RBYk%SgDXHv zgLGXwFh?WmfCzicxBEwfQ!otNF~DPs=e?%?YDi{P#Pfj#AEMV1zWY~r%P{qLe*OHFRAW6ti!@%>BSy(MS7~Gg0%R4-(T@Rg+`V zMA+~Ow#W53f>sbZhp`tiW3nDhc$y|K18da?=ia6BsknSG?P2vz z5Hwm-_DkUqqCt&{Pm9Xc#XWAl*+fbpL&!#R#Wn$bf`11r%$u3eK)s>t<}H*n+dBS( zE(Nh4eUgG94&F=H@J+mS&ghm~)LL{h;!^S`=64PrXa$P@foF_dIKY9hJ`uCvxlE@4 za%8w?j8(OmzZODOH;IZ@a6VE;JHWGs8^TJT#uvKWKVdI`j16nt<0LkK0&i5>Tzy>8 zrSH}8Tj#3Zj=sBqOWHP@PIre&)DYkK{!c%l8NbBE$GReLpr$rLKV8E@kPQm~|2+a? zQ~Y2)>sZGl_9c0H_fcf$Eujxa8~EpfF}D#_xb*S)K-DmEobbT{AXpy;>;XYl-4n!ZA4SxX!O7^{3?3Dj}( zIK~d`=^pMQpztlA55iVM-l%lCzzw(nPwOQaA6Y#`+BYs(gZ0A8!kBHWgBtr$PqBjP ztWUYHc`BVdyNTjUxEkK!D(x9DxdW06=FFY*H^Ul?#iG^VKJoj>3J0l!Lrq*J1GfO! zG8KRZkV8en&7|a(Smx{vJwkgQOBVnZ<0`yLYNFN*T51kj1(NrwGGaV{Ke{zsNMvAa z0@9l~P+ySA0eeD+4Q+IY|;*)m4bqHVD?kT0n*{b!G$wV=Se+%MS17W);m%g*=Xns zw4JYT$X-~X9}qBO(NvEtFNlhfmdr`wWqeLexj3q{G;|Bf&}Cp@RUD4pS=588iSK(b zHs;r|$8`qUV33;U`P&z1a_=sh{*#pTDPaPUT>SitG&uz^7$c$m0Kb0%)(A-YG=~^m z71{(o7EY|gcyd#jEfaxDWBKwm6?W0A;h~W&g4?8EHF00Ycmjuhak+vJ1Z2z^%$+$B zmz5E^<$RAoq}m{#b$wXP)VovyV;&ms~Fz$KXZ7)y9*>MS^-l~g{j9Gl8f z7t7H?oRPjR-jQwBBH5u}J;1`XFXEbalZ_?AXUMpPeIN{&nrW8$Hg$J-?Q$BSjLmcr zNxR^t)T&$43Iu%NV4OTc&xPmw>@`jBPTmx5K?Js$PB|8Sk98U?wDnc2Rx2HwDAPg^ zYT+!{+-ks8&o>$LfY&*HiPb*Abh6Rf+s zjT&#;fBu%tMBJjM*3#Lpze@Gr|BiK7*hj!^iU6O`C*7@MaT*2F&4sjJFmJ~cMb6u4TS^8g?39+N7vD+ml&oAxPOd`4IV{BF>> z6iN^qxzY{Re(2{klS;RQG_Hw@n1kF(oHw`}r7A$S9LAz!?KCNI?X8`(_TqJFk$Sr9 zQQ>iey!e~MAGlYdp~V<`Ff-25EJt}xSX2jR=%EKE0fhl^=P|Cq0xIvbRWv;=j5svH z$>Mlsm9;g4a2rhS-Al8F_o)L!C0DLqvej#TgOp-Q8&7a8#@q))xn9Q3yy{AeQV?ps z1Y{LIb1K13A3cOAup(6^`}`@wrVngzsX#n41Yx9nDx6Q?CFu`!m$3p0H*C6cE7fk_AZY?DcH$5?PT+L)XF@Dn zETm(LV6r$1tb+kUNt(bp7oLzA0CNtFDVpz8@*?5z>5mf?4zc|mAO|x-tHR?LfB|J< zkZOC@p8;P2(Vauc#3^FpFst;>`G z%~jtZEM~o%k(Zha2%z-mI&-|aECdsZZj7r*-y3|Xkx#7_N`4hSW>yhX@ic$&GMz%R zR!Fwd$PDINV+wCE1Xi}G(8ZL%&0-EVtH`MZv-rUBX9Uq$<4TiSVz;6sBd3qR1DF)Rn^6Vh0u1z@gj9Z0Ksr`0i{Q8e;R0QSF^kR< zg3`4bP3ECS3G%+Kq@V7ROhB1$`nWXRmJMhIxRv%{^2Uh-qL7Wg@PLw>aW&!|oU=t@ zt*ziBg_BxBgCG8$>ZB5S?TnS#L$ak7Fo+grdEFvqxTs1<)8H{5Zq}j#WB+CT3R91Y zfvR2Vh89NBfGUyYPg-QywBRl)T36iP0cLGoX5fnTiN>6B;gdL|RW)z2ND!Cce4Ul& zz(-fAWuMOxpq+J3Aw+edB1D17+_Y|MSOJr5fNIf0H>2_S&Qf4M?bB?~oOE;asQUi# zInN*cO}C1L7W?G!7Z)$ZT8jo*Hw%>mm>>iPG+@^BO$MuQ&`RP>grZ}xyUrcT)bA1} zp;!bd*xk#QQu7;_t)=S0TNlt-h;9r_V(%2#hj6hzu<3NdUd!4bZ7gPUoZzn*C9HJ| zC?Hg5R5&8e7r#R}z{8Xal?gAcq0x`njm{P33&u2}qXTgr__Ppi$f49$*&=>m0>yxs z=6DTD_0pODr@eFkjq9l5_}#bdIH8Gc(#Ce%JZKs!5Wj*HfrR+$5kg2sMM4cyX+hAm zdHAum_hCMt@9a$?T7IItSxYm?dUx;MJ9FloGv_sDW;kVUFLRzApW6fOEkdVfoB&57 zgskX0ivc@iA&v2b#o-Bx=aOi`5n*3<-!&cmoYx#r$bGbq@r;fb>&iHJ$~7rIr^#yM zAe)KOr60P8x0RI(kYzDfTgHG|yk0YzZnelau9%d>NN$h;VPqz=4nQ4NC<9&vfMwzV zK2azLBv*RM@JXX%U=0X0^oilSH_9^>h=f0eB&dJK|Vq_u96v7nG}dxb<}2|LL!dLqW%*kyyY66{n- z+3Ul}<7_#7VHYSZ#qXB=BaiDT`vR6JFga2;`Kb>GrkAXuz z)sptya_QK?Ku~yU+5dMbJT1ubL@9o5UyfH_iYg5?kA9t#Ni$-iTg7HS<^Q-h(APCL zDo#x?X&i4~Dcdu$`&EexY0 zq6){`c+MDqG!Z#*42@B-Wj`hQX4asxL<3B9s~nlb7t1K$<%VUnvZ!-Rj}?#-!w#yg zbA%_~C0K+y_vR>3m}rVd)f$7nGRYIg08%_HNg<@$=Psr}7$Zf$qCJMxc>Lu{G&{m^ zKv?CH`1JW-nHYP@Ruot!P?3|_6tWIvnOhG*2I42J2b7C~-_{k_>JE$dIk_a_wB)#* zYuqJ6F-az|5X0$@@R)Kbq!uT2y=Ll~4B_G+E~PI?&-DBS+MeLDc5thwq=z>D{+dZT zRuTX4Pt7$#CUdT#bHcZI;giyq3%Bo7s=o*xOBC& z3Zi|t-uA_ZC}<~SUEiYuJAoG>yx;?Wcd|W39(6g^xHSr73vYMG+?NMqYaJDc+j+w1%`b0XxW_V$o+f*_bNs? z-Tu-qy9ds+MO|~mm5f-opD8dyLGCxSXXcOw8`J1)ld;u&%Y4}pA)q6az}A-O+$d<~ z5?s?{YVSFCf6D8E@SfYuxOMu7=Twp;Y1!yONFP&49pEy(Zm$vI2wy&L zDx8$nPCok-44@$9R)_@y@O;_9lBytvaYHjNbT?05JZIVZXU#u9{d4mrhmG7qsZ3oT zfwte=-Dx`P1vDWx?PO1^dk7qUp-qd-;&<&}jS~f?mxMvfD;Xqw_qSNmJZ|BU5RV$T z$4RbIm-*9$4@>>=ptzUn_+w}WnZVOMcQD2B?cM#5^u&|q#7*w7AV2^8AL5B9WMy!{ z?9+JJc7eXpz5-)}5EnkiXqd@NmI*HJ#>0=W(my69%D(=DyxhjbxiD9QtKb}9T{}&^ z4hrY{xI+J=SJ)Cj{jJ>Z@AYxEk%dq2jqWk_T7N>o!=9k{Jf(xMahPJVAPXm0%c7tQSlkH~PLeibCwgAB5D zJml#J1$2z{<>a;tEFDa%D25n{>SzTSsy_mo$*yTZQ1$7O%DMe$G9MYmQ->kFn7{=cGJ9 z1$J9D9a;auHVu(pp!BBj^)b8%7etBl^|}#|XBbSz*fkme#AXAt3c~QL^ca*U@{>HRL8|Pb1}<`s@M0px@>xJmP#^78X)}=0gNli(paglS*P4MH#%ZqAMrkmn;DVv6h7b)Xu`} zFzPvBfjOBKPGk)cGI(mSY!yykD@+NgsmlSVCGOyn_@?%m8FS$(Lp1H~b@pvS3m;>l z+eX(Fnyj7Zzk8<{JbKhz+&Ltt{(90*W#S_2O1&90LWRIAE~w1Je+Vjz2#;At@iu_H z&1#Z~?2fR>XTFDYywA)y;(|g>CiS}K#!_jlE`{q$&2cRJ8^l|kBk}aOv%mkXnGnCd zf#5S{ia!NM0f%+)+F;PR1@l>O1unF{oxk87*y)$ z-)Ww~=*Eo=J#0K6ljHCZrBButvG7wAD3@u&p@>%y4-6c6kz)F)3fR!Jb9YncO9;Dt z%Xee;9lbN2P4<-D$q-y%h00G_mGams%5-4}fQA(LpoH0kT~CUz5ii#jjLBr1d7>EJ zz?Aevy|HTvlS)K73k^+Hz~m>64jsYW*xdqD^zeyI%up`!rYnSf#B&;45iU*LG|uyv zJ<~gOu->qgWekbuGy#1xr^X%f^hn{!U9W__^sp0@_TA4dUE2HF@2QmE0Wa_c>%id{ zpaLG`7cTm{eCv2isxsFwY2aTnsyNQ{;zxuDw^-xE3U92ogkrV)g+a4Kw8$yWD`;>E zv@m3HnFb2ZxryuiO|gHfYM=Ms)E7?}dH_cREOn`9+NW|4%F!{akz|&nhK5Sw7+B`6 z_J98a!kDCCGtS(k&v9k=$&j^&a=m1ZY%=hmB_@`6c8oW4!q}W3ljIlY<0dHXbNA#h zoWwt+WU!-S=1d4y9fXYoX*iv2hJ%P zv_Ww9KPKYGfHqTM#$o6pRf3yoRIHo@azM1JDV;Nr5T}#VW?PF$Mv_2S;DcR`qC_Be zNo%}*lLJqPLpL03KeE_(V}5>C zczQ#W6F0+p`S&Kd6rx<@#k)lHqk@zX>hmg;w7_6LGhPfVeKIZvAKoMDhRN&?-_K69 zZqTLRR}m3|K=5g46Se^^bHgXB_HzzEMlZvh5Cm1b^+-c3^mLbncV>Rjo-S<$iOi2s z0j$7~NjmW1^YI+0Pmd=M(yY&DojO1byG6k(4f)NsdNBCLZItgV++t`vV-PsGc!H2~ z?Min*8s$w=0z)Twp1RHzsv^7=OKtWWV~#I{7vVP*Q!B{iDfpi^#=aZt0rT#A z&5cif){G9BYr)8v@7*S$fOJoc99Q0Zj)~)6g;K_-K8Wx+=IDty@tZiw*A^1_hqmau zyaluH4ZeccvI{_-@wqZ4yf^oAulMjLa?6zXZ2iN0hlxg)!cnRDB^%@05mY$4Ub((+`Sg}IM0e; zg&FMb1a`a)a-!9~o|#O}0P_=M;7B-P_T9Vp$($fwM_A^BeCZB5BTmR58lx=65cXWF zOd&0rgLv4C%pI*{=ojqE5EjN*j2}5ZDg+p1M;|kc<=#ZHQO=Jjl|{g5-D!Im=Tweq zwOI_;gdx33U814v?L^8eetDP4=OJ!s_i#Tu&$^*kVz|_8%(7VUg2+F#?up?TJcvLG zPrKiY3}qsK>2DrAnl%VCR5FUNS`^YepZO0zS{xAR2>{tI2c3QAxr8l*75qB*^#XaE z-^i^3CbK+dcz#;zVrR;%;e^4Vm4~@R$j^J6cyt$YM|W(j^6Sz$*T22Oi4uOs^hq%FXuN_sCFH#5!?iaa@cR zuPh=1JjN~IC+7x)MMz#H34rjn?Mp*S>1#1qeX{Tu_k&Nn0qgUJ>wM93&UrA%ojeAE zTECf(ZuV*T%|w(K0V)U{mS-(l7|5KFTX<;r=`{%$Wal^P2`7sWo<=5Jh!~!AV%?+y z;WCSn361hBAJaF+o*qqZ=I;+(R;nCCP6{0t@vc)an*B;apd3cKEF!rnau9$Z&xIbb zdLTkM1r7v?6rMk0pm?yxG4btVw$i>X3* ziNu3IB0fw3XbLzBRdA=_Bn7QP&Ue~{!R2v7N|uOK`;vS3Krz%)ex-l@2CSJZ%dfd3 z^@TeSuz6PZk~WhWMFZBv-ed439t{g`x?QZz{0^f4v$HQ4Dd2@cK@F~TP=K7F9J2rd zJUeNh`hdm>Str+`@aV54F!9-}uPA5k$?l~}EJ~{c@`b@-FU1pgMfnM&G*lot_*9@d zb61!c#jfhiS`vi8g%j;yYe2XshY&HSx+rWr<@O$y0anw6VKi`Ic)$TqmxBv`SHPIr zm056L)Y=kzv9P5JcN&heB%dB*A>joOmC4WivYJ2QB|Idt{9=E^ow(pzm&IqDZTbTL zdR^~H6d%!lt$M=+Dt^O93Pu&No`qhe^it`oSWSBrC-S6wcb$7q2qCa+j!o>%c! z^wPsXE6WnnPc}{^VUR#Cl#(hfac;M7n`e&1id%;Q2 zZykJ~XYh%#!@ib2IOG%JrQ+gAaIXdY$3qsm(u9Bc2)~+T$XD719{!~&VAi9xDfhYZ zdHbg#QEoU0^G=^sIDRfTbrCN&;UTK*979R@mmz@Dc?&pYF}~~%aM%cS=at5}VF_iW z%0Z;q7f=V{K?Ib^uBBWIu|TMsjh(T|%QXgx(c{Vm!{9>ATJCqN3=oB7ae3z+IK;Js z@hAHquy+y*dx+tx{UzxbEpo}kk%%^{hn;|k3ld|8V<8iQNEiswNMy}#o{?5gipfg; zO=_*z)$Cm2&Jfu8eYF9yEL@}BOq6lmzUD8mXVA$Q5q&U+jX24kwp2F?SAVBI-+-~K z3dO3&Qqm?>z{V=&i}q3$^>8(ll)O*yn7TY{FJv^cg)CErFz@YDoQlFLZlb3-<|Onb z%CpS5C>(%-KPq6u06xnt5zyA{VAAsRaHP21+>?F?x1I?7vcFwc-|C^btwMH4B^TU5 zv0)sUi8d95XYmTzoZmcKpk#u{8!Bh90X)J75Wb>(5s(T+$DpB9O&US&>}Aja4q_8hOcB?Mp>cwqWs@q8B2<7} z;Id9o3@%^K!XWCg-YjxM37J*H&B1*vRJFeUL_F+Qu7y{)Dj33A$RQ)WD#A+9@)KTJ zMP7hKQg@&0y1TMOtab3=@t|7ETHeMLCtCmBFV7+1GWZc);ud4aI-@`|Ae~=Osd7gs z$LC}kA%~PfGw2lMLGvXPg(r-0HX=gc!d~H4DaVjv&dBvUIngrGT5ga}c$oXZ09~Tb zODHQ<4v!x{UP2dMSGmteU$s;WybTQe?$^J38_m`>D+ZQ=fmTH>rSe*7#X!YC#X!Zt z)xxp{^LH7^oOnDh8@@SSosJT?M#QL~R18!MR1B;O z2C8yc7fMt(Dh4VBDhAdF164V!5iu$#6$2Fm6$9&nfvOzVg%TBxih+uOih(u4KvfQF zM2reb#X!YC#lX5?pel!Tp+tqFVxVH6VqlFh@E_ZreDj$=Ga3K@002ovPDHLkV1m{9 BE2;nh literal 0 HcmV?d00001 From b9d3e8eeec88d9522d2f391e4e50346e6e5cee39 Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Thu, 4 Feb 2021 11:33:48 +0800 Subject: [PATCH 46/77] add dataset len check --- tools/program.py | 5 +++++ tools/train.py | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/tools/program.py b/tools/program.py index 99a37432..34d484d8 100755 --- a/tools/program.py +++ b/tools/program.py @@ -163,6 +163,11 @@ def train(config, if type(eval_batch_step) == list and len(eval_batch_step) >= 2: start_eval_step = eval_batch_step[0] eval_batch_step = eval_batch_step[1] + if len(valid_dataloader) == 0: + logger.info( + 'No Images in eval dataset, evaluation during training will be disabled' + ) + start_eval_step = 1e111 logger.info( "During the training process, after the {}th iteration, an evaluation is run every {} iterations". format(start_eval_step, eval_batch_step)) diff --git a/tools/train.py b/tools/train.py index 383f8d83..a88d64c2 100755 --- a/tools/train.py +++ b/tools/train.py @@ -50,6 +50,11 @@ def main(config, device, logger, vdl_writer): # build dataloader train_dataloader = build_dataloader(config, 'Train', device, logger) + if len(train_dataloader) == 0: + logger.error( + 'No Images in train dataset, please check annotation file and path in the configuration file' + ) + if config['Eval']: valid_dataloader = build_dataloader(config, 'Eval', device, logger) else: From 3b1703e3eb30ef2a2610a34bd7f554f363113a74 Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Thu, 4 Feb 2021 12:01:56 +0800 Subject: [PATCH 47/77] add dataset len check --- tools/train.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/train.py b/tools/train.py index a88d64c2..fab10b64 100755 --- a/tools/train.py +++ b/tools/train.py @@ -54,6 +54,7 @@ def main(config, device, logger, vdl_writer): logger.error( 'No Images in train dataset, please check annotation file and path in the configuration file' ) + return if config['Eval']: valid_dataloader = build_dataloader(config, 'Eval', device, logger) From b97ff21324bedd73a3303c247df88b4204cfddf8 Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Thu, 4 Feb 2021 19:51:44 +0800 Subject: [PATCH 48/77] update readme and requirements.txt --- PPOCRLabel/README.md | 12 ++++++------ PPOCRLabel/README_ch.md | 12 ++++++------ requirements.txt | 3 ++- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/PPOCRLabel/README.md b/PPOCRLabel/README.md index 41a7ab44..1df6c223 100644 --- a/PPOCRLabel/README.md +++ b/PPOCRLabel/README.md @@ -9,7 +9,7 @@ PPOCRLabel is a semi-automatic graphic annotation tool suitable for OCR field, w ### Recent Update - 2021.1.11: Optimize the labeling experience (by [edencfc](https://github.com/edencfc)), - - Users can choose whether to pop up the label input dialog after drawing the detection box in "View - Pop-up Label Input Dialog". + - Users can choose whether to pop up the label input dialog after drawing the detection box in "View - Pop-up Label Input Dialog". - The recognition result scrolls synchronously when users click related detection box. - Click to modify the recognition result.(If you can't change the result, please switch to the system default input method, or switch back to the original input method again) - 2020.12.18: Support re-recognition of a single label box (by [ninetailskim](https://github.com/ninetailskim) ), perfect shortcut keys. @@ -49,7 +49,7 @@ python3 PPOCRLabel.py ``` pip3 install pyqt5 pip3 uninstall opencv-python # Uninstall opencv manually as it conflicts with pyqt -pip3 install opencv-contrib-python-headless # Install the headless version of opencv +pip3 install opencv-contrib-python-headless==4.2.0.32 # Install the headless version of opencv cd ./PPOCRLabel # Change the directory to the PPOCRLabel folder python3 PPOCRLabel.py ``` @@ -127,7 +127,7 @@ Therefore, if the recognition result has been manually changed before, it may ch - Default model: PPOCRLabel uses the Chinese and English ultra-lightweight OCR model in PaddleOCR by default, supports Chinese, English and number recognition, and multiple language detection. -- Model language switching: Changing the built-in model language is supportable by clicking "PaddleOCR"-"Choose OCR Model" in the menu bar. Currently supported languages​include French, German, Korean, and Japanese. +- Model language switching: Changing the built-in model language is supportable by clicking "PaddleOCR"-"Choose OCR Model" in the menu bar. Currently supported languages​include French, German, Korean, and Japanese. For specific model download links, please refer to [PaddleOCR Model List](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_en/models_list_en.md#multilingual-recognition-modelupdating) - Custom model: The model trained by users can be replaced by modifying PPOCRLabel.py in [PaddleOCR class instantiation](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/PPOCRLabel/PPOCRLabel.py#L110) referring [Custom Model Code](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/doc/doc_en/whl_en.md#use-custom-model) @@ -160,11 +160,11 @@ For some data that are difficult to recognize, the recognition results will not ``` pyrcc5 -o libs/resources.py resources.qrc ``` -- If you get an error ``` module 'cv2' has no attribute 'INTER_NEAREST'```, you need to delete all opencv related packages first, and then reinstall the headless version of opencv +- If you get an error ``` module 'cv2' has no attribute 'INTER_NEAREST'```, you need to delete all opencv related packages first, and then reinstall the 4.2.0.32 version of headless opencv ``` - pip install opencv-contrib-python-headless + pip install opencv-contrib-python-headless==4.2.0.32 ``` - + ### Related 1.[Tzutalin. LabelImg. Git code (2015)](https://github.com/tzutalin/labelImg) diff --git a/PPOCRLabel/README_ch.md b/PPOCRLabel/README_ch.md index df4f7dfa..b9bfc9e0 100644 --- a/PPOCRLabel/README_ch.md +++ b/PPOCRLabel/README_ch.md @@ -49,7 +49,7 @@ python3 PPOCRLabel.py --lang ch ``` pip3 install pyqt5 pip3 uninstall opencv-python # 由于mac版本的opencv与pyqt有冲突,需先手动卸载opencv -pip3 install opencv-contrib-python-headless # 安装headless版本的open-cv +pip3 install opencv-contrib-python-headless==4.2.0.32 # 安装headless版本的open-cv cd ./PPOCRLabel # 将目录切换到PPOCRLabel文件夹下 python3 PPOCRLabel.py --lang ch ``` @@ -132,22 +132,22 @@ PPOCRLabel支持三种保存方式: ### 错误提示 - 如果同时使用whl包安装了paddleocr,其优先级大于通过paddleocr.py调用PaddleOCR类,whl包未更新时会导致程序异常。 - + - PPOCRLabel**不支持对中文文件名**的图片进行自动标注。 - 针对Linux用户:如果您在打开软件过程中出现**objc[XXXXX]**开头的错误,证明您的opencv版本太高,建议安装4.2版本: ``` pip install opencv-python==4.2.0.32 ``` - + - 如果出现 ```Missing string id``` 开头的错误,需要重新编译资源: ``` pyrcc5 -o libs/resources.py resources.qrc ``` - -- 如果出现``` module 'cv2' has no attribute 'INTER_NEAREST'```错误,需要首先删除所有opencv相关包,然后重新安装headless版本的opencv + +- 如果出现``` module 'cv2' has no attribute 'INTER_NEAREST'```错误,需要首先删除所有opencv相关包,然后重新安装4.2.0.32版本的headless opencv ``` - pip install opencv-contrib-python-headless + pip install opencv-contrib-python-headless==4.2.0.32 ``` ### 参考资料 diff --git a/requirements.txt b/requirements.txt index 13218963..2401d52b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,6 @@ shapely -imgaug +scikit-image==0.17.2 +imgaug==0.4.0 pyclipper lmdb opencv-python==4.2.0.32 From 4a8fc9176d16602a61c85cfa09cabf2c934f34a5 Mon Sep 17 00:00:00 2001 From: light1003 Date: Fri, 5 Feb 2021 15:49:01 +0800 Subject: [PATCH 49/77] Update det_basic_loss.py to_variable has been removed in paddle 2.0,using to_tensor instead --- ppocr/losses/det_basic_loss.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ppocr/losses/det_basic_loss.py b/ppocr/losses/det_basic_loss.py index 57b3667d..eba5526d 100644 --- a/ppocr/losses/det_basic_loss.py +++ b/ppocr/losses/det_basic_loss.py @@ -200,6 +200,6 @@ def ohem_batch(scores, gt_texts, training_masks, ohem_ratio): i, :, :], ohem_ratio)) selected_masks = np.concatenate(selected_masks, 0) - selected_masks = paddle.to_variable(selected_masks) + selected_masks = paddle.to_tensor(selected_masks) return selected_masks From 3b5360dcd927f8516014619e44a888f56b0f6c72 Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Fri, 5 Feb 2021 16:22:32 +0800 Subject: [PATCH 50/77] update rare dataset path --- configs/rec/rec_mv3_tps_bilstm_att.yml | 4 ++-- configs/rec/rec_r34_vd_tps_bilstm_att.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/configs/rec/rec_mv3_tps_bilstm_att.yml b/configs/rec/rec_mv3_tps_bilstm_att.yml index 0ce06734..3cf1f7ab 100644 --- a/configs/rec/rec_mv3_tps_bilstm_att.yml +++ b/configs/rec/rec_mv3_tps_bilstm_att.yml @@ -66,7 +66,7 @@ Metric: Train: dataset: name: LMDBDataSet - data_dir: ../training/ + data_dir: ./train_data/data_lmdb_release/training/ transforms: - DecodeImage: # load image img_mode: BGR @@ -85,7 +85,7 @@ Train: Eval: dataset: name: LMDBDataSet - data_dir: ../validation/ + data_dir: ./train_data/data_lmdb_release/validation/ transforms: - DecodeImage: # load image img_mode: BGR diff --git a/configs/rec/rec_r34_vd_tps_bilstm_att.yml b/configs/rec/rec_r34_vd_tps_bilstm_att.yml index 02aeb8c5..659a1723 100644 --- a/configs/rec/rec_r34_vd_tps_bilstm_att.yml +++ b/configs/rec/rec_r34_vd_tps_bilstm_att.yml @@ -65,7 +65,7 @@ Metric: Train: dataset: name: LMDBDataSet - data_dir: ../training/ + data_dir: ./train_data/data_lmdb_release/training/ transforms: - DecodeImage: # load image img_mode: BGR @@ -84,7 +84,7 @@ Train: Eval: dataset: name: LMDBDataSet - data_dir: ../validation/ + data_dir: ./train_data/data_lmdb_release/validation/ transforms: - DecodeImage: # load image img_mode: BGR From 4410b783404f85e01ae644a204d1b9d2370f6ee6 Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Fri, 5 Feb 2021 16:28:30 +0800 Subject: [PATCH 51/77] update srn dataset path --- configs/rec/rec_r50_fpn_srn.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/configs/rec/rec_r50_fpn_srn.yml b/configs/rec/rec_r50_fpn_srn.yml index ec7f1705..6b38616c 100644 --- a/configs/rec/rec_r50_fpn_srn.yml +++ b/configs/rec/rec_r50_fpn_srn.yml @@ -59,7 +59,7 @@ Metric: Train: dataset: name: LMDBDataSet - data_dir: ./train_data/srn_train_data_duiqi + data_dir: ./train_data/data_lmdb_release/training/ transforms: - DecodeImage: # load image img_mode: BGR @@ -84,7 +84,7 @@ Train: Eval: dataset: name: LMDBDataSet - data_dir: ./train_data/data_lmdb_release/evaluation + data_dir: ./train_data/data_lmdb_release/validation/ transforms: - DecodeImage: # load image img_mode: BGR From ff6b193a6103601aab8bf382ce9d66fdf72074de Mon Sep 17 00:00:00 2001 From: tink2123 Date: Sun, 7 Feb 2021 07:31:24 +0000 Subject: [PATCH 52/77] polish code for srn eval --- tools/program.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/tools/program.py b/tools/program.py index 99a37432..3dc85500 100755 --- a/tools/program.py +++ b/tools/program.py @@ -177,6 +177,8 @@ def train(config, model_average = False model.train() + use_srn = config['Architecture']['algorithm'] == "SRN" + if 'start_epoch' in best_model_dict: start_epoch = best_model_dict['start_epoch'] else: @@ -195,7 +197,7 @@ def train(config, break lr = optimizer.get_lr() images = batch[0] - if config['Architecture']['algorithm'] == "SRN": + if use_srn: others = batch[-4:] preds = model(images, others) model_average = True @@ -251,8 +253,12 @@ def train(config, min_average_window=10000, max_average_window=15625) Model_Average.apply() - cur_metric = eval(model, valid_dataloader, post_process_class, - eval_class) + cur_metric = eval( + model, + valid_dataloader, + post_process_class, + eval_class, + use_srn=use_srn) cur_metric_str = 'cur metric, {}'.format(', '.join( ['{}: {}'.format(k, v) for k, v in cur_metric.items()])) logger.info(cur_metric_str) @@ -316,7 +322,8 @@ def train(config, return -def eval(model, valid_dataloader, post_process_class, eval_class): +def eval(model, valid_dataloader, post_process_class, eval_class, + use_srn=False): model.eval() with paddle.no_grad(): total_frame = 0.0 @@ -327,7 +334,8 @@ def eval(model, valid_dataloader, post_process_class, eval_class): break images = batch[0] start = time.time() - if "SRN" in str(model.head): + + if use_srn: others = batch[-4:] preds = model(images, others) else: From 3728183276617ac5f8e27fbabfc5862cb1d5694f Mon Sep 17 00:00:00 2001 From: littletomatodonkey Date: Sun, 7 Feb 2021 08:09:39 +0000 Subject: [PATCH 53/77] fix are calc for polygon --- ppocr/data/imaug/make_shrink_map.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/ppocr/data/imaug/make_shrink_map.py b/ppocr/data/imaug/make_shrink_map.py index a66706f2..8e612235 100644 --- a/ppocr/data/imaug/make_shrink_map.py +++ b/ppocr/data/imaug/make_shrink_map.py @@ -84,11 +84,12 @@ class MakeShrinkMap(object): return polygons, ignore_tags def polygon_area(self, polygon): - # return cv2.contourArea(polygon.astype(np.float32)) - edge = 0 - for i in range(polygon.shape[0]): - next_index = (i + 1) % polygon.shape[0] - edge += (polygon[next_index, 0] - polygon[i, 0]) * ( - polygon[next_index, 1] - polygon[i, 1]) - - return edge / 2. + """ + compute polygon area + """ + area = 0 + q = polygon[-1] + for p in polygon: + area += p[0] * q[1] - p[1] * q[0] + q = p + return area / 2.0 From 4b845ca159bbe720ca31862b808244f56f6e8e5c Mon Sep 17 00:00:00 2001 From: tink2123 Date: Mon, 8 Feb 2021 03:30:27 +0000 Subject: [PATCH 54/77] fix attn encode --- ppocr/data/imaug/label_ops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ppocr/data/imaug/label_ops.py b/ppocr/data/imaug/label_ops.py index 55870a50..fd6ba646 100644 --- a/ppocr/data/imaug/label_ops.py +++ b/ppocr/data/imaug/label_ops.py @@ -215,7 +215,7 @@ class AttnLabelEncode(BaseRecLabelEncode): return None data['length'] = np.array(len(text)) text = [0] + text + [len(self.character) - 1] + [0] * (self.max_text_len - - len(text) - 1) + - len(text) - 2) data['label'] = np.array(text) return data From 3d8b42dc0c39fd3ab26251c9de21a1c0d414a7ef Mon Sep 17 00:00:00 2001 From: tink2123 Date: Mon, 8 Feb 2021 03:21:01 +0000 Subject: [PATCH 55/77] fix encode for srn --- ppocr/data/imaug/label_ops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ppocr/data/imaug/label_ops.py b/ppocr/data/imaug/label_ops.py index fd6ba646..7a32d870 100644 --- a/ppocr/data/imaug/label_ops.py +++ b/ppocr/data/imaug/label_ops.py @@ -261,7 +261,7 @@ class SRNLabelEncode(BaseRecLabelEncode): if len(text) > self.max_text_len: return None data['length'] = np.array(len(text)) - text = text + [char_num] * (self.max_text_len - len(text)) + text = text + [char_num - 1] * (self.max_text_len - len(text)) data['label'] = np.array(text) return data From b658e10b35352648e7f27e3bd741f8485e992875 Mon Sep 17 00:00:00 2001 From: tink2123 Date: Mon, 8 Feb 2021 05:41:23 +0000 Subject: [PATCH 56/77] fix srn for eval --- tools/eval.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/eval.py b/tools/eval.py index 16cfe532..4afed469 100755 --- a/tools/eval.py +++ b/tools/eval.py @@ -47,6 +47,7 @@ def main(): config['Architecture']["Head"]['out_channels'] = len( getattr(post_process_class, 'character')) model = build_model(config['Architecture']) + use_srn = config['Architecture']['algorithm'] == "SRN" best_model_dict = init_model(config, model, logger) if len(best_model_dict): @@ -59,7 +60,7 @@ def main(): # start eval metirc = program.eval(model, valid_dataloader, post_process_class, - eval_class) + eval_class, use_srn) logger.info('metric eval ***************') for k, v in metirc.items(): logger.info('{}:{}'.format(k, v)) From 4eb351866c1bda9db33c77c93e6fdc0164c9ee3a Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Mon, 8 Feb 2021 14:05:48 +0800 Subject: [PATCH 57/77] predict_rec support rare --- tools/infer/predict_rec.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools/infer/predict_rec.py b/tools/infer/predict_rec.py index fd895e50..b3d9d490 100755 --- a/tools/infer/predict_rec.py +++ b/tools/infer/predict_rec.py @@ -54,6 +54,13 @@ class TextRecognizer(object): "character_dict_path": args.rec_char_dict_path, "use_space_char": args.use_space_char } + elif self.rec_algorithm == "RARE": + postprocess_params = { + 'name': 'AttnLabelDecode', + "character_type": args.rec_char_type, + "character_dict_path": args.rec_char_dict_path, + "use_space_char": args.use_space_char + } self.postprocess_op = build_post_process(postprocess_params) self.predictor, self.input_tensor, self.output_tensors = \ utility.create_predictor(args, 'rec', logger) From ed2b527c9ff185425b7ea42a6f88926294899e52 Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Mon, 8 Feb 2021 14:06:10 +0800 Subject: [PATCH 58/77] fix rare export error --- ppocr/modeling/heads/rec_att_head.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ppocr/modeling/heads/rec_att_head.py b/ppocr/modeling/heads/rec_att_head.py index a7cfe128..59e26c1e 100644 --- a/ppocr/modeling/heads/rec_att_head.py +++ b/ppocr/modeling/heads/rec_att_head.py @@ -146,6 +146,9 @@ class AttentionLSTM(nn.Layer): else: targets = paddle.zeros(shape=[batch_size], dtype="int32") probs = None + char_onehots = None + outputs = None + alpha = None for i in range(num_steps): char_onehots = self._char_to_onehot( From 33f302e5ff0e2b893a9b4d1c712e98fe3e5e5d84 Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Tue, 9 Feb 2021 13:36:38 +0800 Subject: [PATCH 59/77] support img_h < 32 --- ppocr/data/imaug/operators.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ppocr/data/imaug/operators.py b/ppocr/data/imaug/operators.py index 8b9175cf..eacfdf3b 100644 --- a/ppocr/data/imaug/operators.py +++ b/ppocr/data/imaug/operators.py @@ -185,8 +185,8 @@ class DetResizeForTest(object): resize_h = int(h * ratio) resize_w = int(w * ratio) - resize_h = int(round(resize_h / 32) * 32) - resize_w = int(round(resize_w / 32) * 32) + resize_h = max(int(round(resize_h / 32) * 32), 32) + resize_w = max(int(round(resize_w / 32) * 32), 32) try: if int(resize_w) <= 0 or int(resize_h) <= 0: From 0b53465894eb22174c96fed971f6433021efb93c Mon Sep 17 00:00:00 2001 From: Tingquan Gao <35441050@qq.com> Date: Tue, 9 Feb 2021 18:05:59 +0800 Subject: [PATCH 60/77] Adapt to paddle inference 2.0 (#1976) --- deploy/cpp_infer/CMakeLists.txt | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/deploy/cpp_infer/CMakeLists.txt b/deploy/cpp_infer/CMakeLists.txt index 11883367..120cf06a 100644 --- a/deploy/cpp_infer/CMakeLists.txt +++ b/deploy/cpp_infer/CMakeLists.txt @@ -133,7 +133,11 @@ if(WITH_MKL) endif () endif() else() - set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX}) + if (WIN32) + set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/openblas${CMAKE_STATIC_LIBRARY_SUFFIX}) + else () + set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX}) + endif () endif() # Note: libpaddle_inference_api.so/a must put before libpaddle_fluid.so/a @@ -157,7 +161,7 @@ endif(WITH_STATIC_LIB) if (NOT WIN32) set(DEPS ${DEPS} - ${MATH_LIB} ${MKLDNN_LIB} + ${MATH_LIB} ${MKLDNN_LIB} glog gflags protobuf z xxhash ) if(EXISTS "${PADDLE_LIB}/third_party/install/snappystream/lib") From 3ab2d46310b3f6222b4be44a09f92dcaa32971c2 Mon Sep 17 00:00:00 2001 From: zhoujun Date: Tue, 9 Feb 2021 18:44:40 +0800 Subject: [PATCH 61/77] support img_h < 32 (#1992) --- ppocr/data/imaug/operators.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ppocr/data/imaug/operators.py b/ppocr/data/imaug/operators.py index 8b9175cf..eacfdf3b 100644 --- a/ppocr/data/imaug/operators.py +++ b/ppocr/data/imaug/operators.py @@ -185,8 +185,8 @@ class DetResizeForTest(object): resize_h = int(h * ratio) resize_w = int(w * ratio) - resize_h = int(round(resize_h / 32) * 32) - resize_w = int(round(resize_w / 32) * 32) + resize_h = max(int(round(resize_h / 32) * 32), 32) + resize_w = max(int(round(resize_w / 32) * 32), 32) try: if int(resize_w) <= 0 or int(resize_h) <= 0: From ddf548ad5e4d27a53cdee842a3477e2226c575d9 Mon Sep 17 00:00:00 2001 From: MissPenguin Date: Thu, 18 Feb 2021 11:20:51 +0800 Subject: [PATCH 62/77] Update inference_en.md --- doc/doc_en/inference_en.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/doc_en/inference_en.md b/doc/doc_en/inference_en.md index c8ce1424..6b745619 100755 --- a/doc/doc_en/inference_en.md +++ b/doc/doc_en/inference_en.md @@ -147,7 +147,7 @@ The visual text detection results are saved to the ./inference_results folder by ![](../imgs_results/det_res_00018069.jpg) You can use the parameters `limit_type` and `det_limit_side_len` to limit the size of the input image, -The optional parameters of `litmit_type` are [`max`, `min`], and +The optional parameters of `limit_type` are [`max`, `min`], and `det_limit_size_len` is a positive integer, generally set to a multiple of 32, such as 960. The default setting of the parameters is `limit_type='max', det_limit_side_len=960`. Indicates that the longest side of the network input image cannot exceed 960, From 65e61f441c839b12a6573b7686620ad3b9ebbe9a Mon Sep 17 00:00:00 2001 From: MissPenguin Date: Thu, 18 Feb 2021 11:21:16 +0800 Subject: [PATCH 63/77] Update inference.md --- doc/doc_ch/inference.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/doc_ch/inference.md b/doc/doc_ch/inference.md index 0daddd9b..f0a8983c 100755 --- a/doc/doc_ch/inference.md +++ b/doc/doc_ch/inference.md @@ -140,7 +140,7 @@ python3 tools/infer/predict_det.py --image_dir="./doc/imgs/00018069.jpg" --det_m ![](../imgs_results/det_res_00018069.jpg) 通过参数`limit_type`和`det_limit_side_len`来对图片的尺寸进行限制, -`litmit_type`可选参数为[`max`, `min`], +`limit_type`可选参数为[`max`, `min`], `det_limit_size_len` 为正整数,一般设置为32 的倍数,比如960。 参数默认设置为`limit_type='max', det_limit_side_len=960`。表示网络输入图像的最长边不能超过960, From bbdbf598d4ef372409855b47d54bae7ccd54ce51 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Thu, 18 Feb 2021 18:42:47 +0800 Subject: [PATCH 64/77] keep dilation as False --- tools/infer/predict_det.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/infer/predict_det.py b/tools/infer/predict_det.py index 077692af..dec99642 100755 --- a/tools/infer/predict_det.py +++ b/tools/infer/predict_det.py @@ -64,7 +64,7 @@ class TextDetector(object): postprocess_params["box_thresh"] = args.det_db_box_thresh postprocess_params["max_candidates"] = 1000 postprocess_params["unclip_ratio"] = args.det_db_unclip_ratio - postprocess_params["use_dilation"] = True + postprocess_params["use_dilation"] = False elif self.det_algorithm == "EAST": postprocess_params['name'] = 'EASTPostProcess' postprocess_params["score_thresh"] = args.det_east_score_thresh From 5fc33c1213f6e3b40da030bb7d8ae73110649eb3 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Thu, 18 Feb 2021 18:45:53 +0800 Subject: [PATCH 65/77] keep dilation as False --- tools/infer/predict_det.py | 2 +- tools/infer/utility.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/infer/predict_det.py b/tools/infer/predict_det.py index dec99642..76c6a447 100755 --- a/tools/infer/predict_det.py +++ b/tools/infer/predict_det.py @@ -64,7 +64,7 @@ class TextDetector(object): postprocess_params["box_thresh"] = args.det_db_box_thresh postprocess_params["max_candidates"] = 1000 postprocess_params["unclip_ratio"] = args.det_db_unclip_ratio - postprocess_params["use_dilation"] = False + postprocess_params["use_dilation"] = args.use_dilation elif self.det_algorithm == "EAST": postprocess_params['name'] = 'EASTPostProcess' postprocess_params["score_thresh"] = args.det_east_score_thresh diff --git a/tools/infer/utility.py b/tools/infer/utility.py index e842e7b0..3b92c093 100755 --- a/tools/infer/utility.py +++ b/tools/infer/utility.py @@ -47,6 +47,7 @@ def parse_args(): parser.add_argument("--det_db_box_thresh", type=float, default=0.5) parser.add_argument("--det_db_unclip_ratio", type=float, default=1.6) parser.add_argument("--max_batch_size", type=int, default=10) + parser.add_argument("--use_dilation", type=bool, default=False) # EAST parmas parser.add_argument("--det_east_score_thresh", type=float, default=0.8) parser.add_argument("--det_east_cover_thresh", type=float, default=0.1) From bbd7665c471e682d664b7f1b086d9cf68a4bab20 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Thu, 18 Feb 2021 18:53:28 +0800 Subject: [PATCH 66/77] fix commit --- doc/doc_ch/installation.md | 2 +- doc/doc_en/inference_en.md | 2 +- doc/doc_en/installation_en.md | 2 +- tools/infer/predict_det.py | 2 +- tools/infer/utility.py | 1 + 5 files changed, 5 insertions(+), 4 deletions(-) diff --git a/doc/doc_ch/installation.md b/doc/doc_ch/installation.md index 95a7a52c..7e7523b9 100644 --- a/doc/doc_ch/installation.md +++ b/doc/doc_ch/installation.md @@ -30,7 +30,7 @@ sudo nvidia-docker run --name ppocr -v $PWD:/paddle --shm-size=64G --network=hos sudo docker container exec -it ppocr /bin/bash ``` -**2. 安装PaddlePaddle v2.0** +**2. 安装PaddlePaddle 2.0** ``` pip3 install --upgrade pip diff --git a/doc/doc_en/inference_en.md b/doc/doc_en/inference_en.md index 48eaaa5a..aa3e0536 100755 --- a/doc/doc_en/inference_en.md +++ b/doc/doc_en/inference_en.md @@ -6,7 +6,7 @@ The inference model (the model saved by `paddle.jit.save`) is generally a solidi The model saved during the training process is the checkpoints model, which saves the parameters of the model and is mostly used to resume training. Compared with the checkpoints model, the inference model will additionally save the structural information of the model. Therefore, it is easier to deploy because the model structure and model parameters are already solidified in the inference model file, and is suitable for integration with actual systems. -For more details, please refer to the document [Classification Framework](https://github.com/PaddlePaddle/PaddleClas/blob/master/docs/zh_CN/extension/paddle_inference.md). +For more details, please refer to the document [Classification Framework](https://github.com/PaddlePaddle/PaddleClas/blob/release%2F2.0/docs/zh_CN/extension/paddle_mobile_inference.md). Next, we first introduce how to convert a trained model into an inference model, and then we will introduce text detection, text recognition, angle class, and the concatenation of them based on inference model. diff --git a/doc/doc_en/installation_en.md b/doc/doc_en/installation_en.md index ad29aa23..dec384b2 100644 --- a/doc/doc_en/installation_en.md +++ b/doc/doc_en/installation_en.md @@ -33,7 +33,7 @@ You can also visit [DockerHub](https://hub.docker.com/r/paddlepaddle/paddle/tags sudo docker container exec -it ppocr /bin/bash ``` -**2. Install PaddlePaddle v2.0** +**2. Install PaddlePaddle 2.0** ``` pip3 install --upgrade pip diff --git a/tools/infer/predict_det.py b/tools/infer/predict_det.py index 76c6a447..077692af 100755 --- a/tools/infer/predict_det.py +++ b/tools/infer/predict_det.py @@ -64,7 +64,7 @@ class TextDetector(object): postprocess_params["box_thresh"] = args.det_db_box_thresh postprocess_params["max_candidates"] = 1000 postprocess_params["unclip_ratio"] = args.det_db_unclip_ratio - postprocess_params["use_dilation"] = args.use_dilation + postprocess_params["use_dilation"] = True elif self.det_algorithm == "EAST": postprocess_params['name'] = 'EASTPostProcess' postprocess_params["score_thresh"] = args.det_east_score_thresh diff --git a/tools/infer/utility.py b/tools/infer/utility.py index 3b92c093..a4a91efd 100755 --- a/tools/infer/utility.py +++ b/tools/infer/utility.py @@ -124,6 +124,7 @@ def create_predictor(args, mode, logger): # cache 10 different shapes for mkldnn to avoid memory leak config.set_mkldnn_cache_capacity(10) config.enable_mkldnn() + # TODO LDOUBLEV: fix mkldnn bug when bach_size > 1 #config.set_mkldnn_op({'conv2d', 'depthwise_conv2d', 'pool2d', 'batch_norm'}) args.rec_batch_num = 1 From cf550d052469e11bff6d1d88a9e473c45df5d21e Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Thu, 18 Feb 2021 19:00:30 +0800 Subject: [PATCH 67/77] set dilation as False --- tools/infer/predict_det.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/infer/predict_det.py b/tools/infer/predict_det.py index 077692af..76c6a447 100755 --- a/tools/infer/predict_det.py +++ b/tools/infer/predict_det.py @@ -64,7 +64,7 @@ class TextDetector(object): postprocess_params["box_thresh"] = args.det_db_box_thresh postprocess_params["max_candidates"] = 1000 postprocess_params["unclip_ratio"] = args.det_db_unclip_ratio - postprocess_params["use_dilation"] = True + postprocess_params["use_dilation"] = args.use_dilation elif self.det_algorithm == "EAST": postprocess_params['name'] = 'EASTPostProcess' postprocess_params["score_thresh"] = args.det_east_score_thresh From 9291c87271698ab18ebd5e4a7749fa487e9cdb42 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Thu, 18 Feb 2021 19:03:52 +0800 Subject: [PATCH 68/77] keep postprocess params same --- deploy/cpp_infer/tools/config.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/cpp_infer/tools/config.txt b/deploy/cpp_infer/tools/config.txt index e185377e..24e4ef0d 100644 --- a/deploy/cpp_infer/tools/config.txt +++ b/deploy/cpp_infer/tools/config.txt @@ -9,7 +9,7 @@ use_mkldnn 0 max_side_len 960 det_db_thresh 0.3 det_db_box_thresh 0.5 -det_db_unclip_ratio 2.0 +det_db_unclip_ratio 1.6 det_model_dir ./inference/ch_ppocr_mobile_v2.0_det_infer/ # cls config From e0851f2b3f569faa97bb8b63ef3fcc483646b424 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Fri, 19 Feb 2021 11:48:38 +0800 Subject: [PATCH 69/77] add prune demo --- deploy/slim/prune/README.md | 64 +++++++++ deploy/slim/prune/README_en.md | 71 ++++++++++ deploy/slim/prune/export_prune_model.py | 132 ++++++++++++++++++ deploy/slim/prune/sensitivity_anal.py | 171 ++++++++++++++++++++++++ 4 files changed, 438 insertions(+) create mode 100644 deploy/slim/prune/README.md create mode 100644 deploy/slim/prune/README_en.md create mode 100644 deploy/slim/prune/export_prune_model.py create mode 100644 deploy/slim/prune/sensitivity_anal.py diff --git a/deploy/slim/prune/README.md b/deploy/slim/prune/README.md new file mode 100644 index 00000000..d9675c5a --- /dev/null +++ b/deploy/slim/prune/README.md @@ -0,0 +1,64 @@ + +## 介绍 + +复杂的模型有利于提高模型的性能,但也导致模型中存在一定冗余,模型裁剪通过移出网络模型中的子模型来减少这种冗余,达到减少模型计算复杂度,提高模型推理性能的目的。 +本教程将介绍如何使用飞桨模型压缩库PaddleSlim做PaddleOCR模型的压缩。 +[PaddleSlim](https://github.com/PaddlePaddle/PaddleSlim)集成了模型剪枝、量化(包括量化训练和离线量化)、蒸馏和神经网络搜索等多种业界常用且领先的模型压缩功能,如果您感兴趣,可以关注并了解。 + + +在开始本教程之前,建议先了解: +1. [PaddleOCR模型的训练方法](../../../doc/doc_ch/quickstart.md) +2. [模型裁剪教程](https://github.com/PaddlePaddle/PaddleSlim/blob/release%2F2.0.0/docs/zh_cn/tutorials/pruning/dygraph/filter_pruning.md) + + +## 快速开始 + +模型裁剪主要包括四个步骤: +1. 安装 PaddleSlim +2. 准备训练好的模型 +3. 敏感度分析、裁剪训练 +4. 导出模型、预测部署 + +### 1. 安装PaddleSlim + +```bash +git clone https://github.com/PaddlePaddle/PaddleSlim.git +git checkout develop +cd Paddleslim +python3 setup.py install +``` + +### 2. 获取预训练模型 +模型裁剪需要加载事先训练好的模型,PaddleOCR也提供了一系列(模型)[../../../doc/doc_ch/models_list.md],开发者可根据需要自行选择模型或使用自己的模型。 + +### 3. 敏感度分析训练 + +加载预训练模型后,通过对现有模型的每个网络层进行敏感度分析,得到敏感度文件:sen.pickle,可以通过PaddleSlim提供的[接口](https://github.com/PaddlePaddle/PaddleSlim/blob/9b01b195f0c4bc34a1ab434751cb260e13d64d9e/paddleslim/dygraph/prune/filter_pruner.py#L75)加载文件,获得各网络层在不同裁剪比例下的精度损失。从而了解各网络层冗余度,决定每个网络层的裁剪比例。 +敏感度文件内容格式: + sen.pickle(Dict){ + 'layer_weight_name_0': sens_of_each_ratio(Dict){'pruning_ratio_0': acc_loss, 'pruning_ratio_1': acc_loss} + 'layer_weight_name_1': sens_of_each_ratio(Dict){'pruning_ratio_0': acc_loss, 'pruning_ratio_1': acc_loss} + } + + 例子: + { + 'conv10_expand_weights': {0.1: 0.006509952684312718, 0.2: 0.01827734339798862, 0.3: 0.014528405644659832, 0.6: 0.06536008804270439, 0.8: 0.11798612250664964, 0.7: 0.12391408417493704, 0.4: 0.030615754498018757, 0.5: 0.047105205602406594} + 'conv10_linear_weights': {0.1: 0.05113190831455035, 0.2: 0.07705573833558801, 0.3: 0.12096721757739311, 0.6: 0.5135061352930738, 0.8: 0.7908166677143281, 0.7: 0.7272187676899062, 0.4: 0.1819252083008504, 0.5: 0.3728054727792405} + } +加载敏感度文件后会返回一个字典,字典中的keys为网络模型参数模型的名字,values为一个字典,里面保存了相应网络层的裁剪敏感度信息。例如在例子中,conv10_expand_weights所对应的网络层在裁掉10%的卷积核后模型性能相较原模型会下降0.65%,详细信息可见[PaddleSlim](https://github.com/PaddlePaddle/PaddleSlim/blob/develop/docs/zh_cn/algo/algo.md#2-%E5%8D%B7%E7%A7%AF%E6%A0%B8%E5%89%AA%E8%A3%81%E5%8E%9F%E7%90%86) + +进入PaddleOCR根目录,通过以下命令对模型进行敏感度分析训练: +```bash +python3.7 deploy/slim/prune/sensitivity_anal.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o Global.pretrain_weights="your trained model" +``` + +### 4. 导出模型、预测部署 + +在得到裁剪训练保存的模型后,我们可以将其导出为inference_model: +```bash +pytho3.7 deploy/slim/prune/export_prune_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o Global.pretrain_weights=./output/det_db/best_accuracy Global.save_inference_dir=inference_model +``` + +inference model的预测和部署参考: +1. [inference model python端预测](../../../doc/doc_ch/inference.md) +2. [inference model C++预测](../../cpp_infer/readme.md) diff --git a/deploy/slim/prune/README_en.md b/deploy/slim/prune/README_en.md new file mode 100644 index 00000000..70cfd580 --- /dev/null +++ b/deploy/slim/prune/README_en.md @@ -0,0 +1,71 @@ + +## Introduction + +Generally, a more complex model would achive better performance in the task, but it also leads to some redundancy in the model. Model Pruning is a technique that reduces this redundancy by removing the sub-models in the neural network model, so as to reduce model calculation complexity and improve model inference performance. + +This example uses PaddleSlim provided[APIs of Pruning](https://paddlepaddle.github.io/PaddleSlim/api/prune_api/) to compress the OCR model. +[PaddleSlim](https://github.com/PaddlePaddle/PaddleSlim), an open source library which integrates model pruning, quantization (including quantization training and offline quantization), distillation, neural network architecture search, and many other commonly used and leading model compression technique in the industry. + +It is recommended that you could understand following pages before reading this example: +1. [PaddleOCR training methods](../../../doc/doc_ch/quickstart.md) +2. [The demo of prune](https://github.com/PaddlePaddle/PaddleSlim/blob/release%2F2.0.0/docs/zh_cn/tutorials/pruning/dygraph/filter_pruning.md) + +## Quick start + +Five steps for OCR model prune: +1. Install PaddleSlim +2. Prepare the trained model +3. Sensitivity analysis and tailoring training +4. Export model, predict deployment + +### 1. Install PaddleSlim + +```bash +git clone https://github.com/PaddlePaddle/PaddleSlim.git +git checkout develop +cd Paddleslim +python3 setup.py install +``` + + +### 2. Download Pretrain Model +Model prune needs to load pre-trained models. +PaddleOCR also provides a series of (models)[../../../doc/doc_en/models_list_en.md]. Developers can choose their own models or use their own models according to their needs. + + +### 3. Pruning sensitivity analysis + + After the pre-training model is loaded, sensitivity analysis is performed on each network layer of the model to understand the redundancy of each network layer, and save a sensitivity file which named: sen.pickle. After that, user could load the sensitivity file via the [methods provided by PaddleSlim](https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/prune/sensitive.py#L221) and determining the pruning ratio of each network layer automatically. For specific details of sensitivity analysis, see:[Sensitivity analysis](https://github.com/PaddlePaddle/PaddleSlim/blob/develop/docs/zh_cn/tutorials/image_classification_sensitivity_analysis_tutorial.md) + The data format of sensitivity file: + sen.pickle(Dict){ + 'layer_weight_name_0': sens_of_each_ratio(Dict){'pruning_ratio_0': acc_loss, 'pruning_ratio_1': acc_loss} + 'layer_weight_name_1': sens_of_each_ratio(Dict){'pruning_ratio_0': acc_loss, 'pruning_ratio_1': acc_loss} + } + + example: + { + 'conv10_expand_weights': {0.1: 0.006509952684312718, 0.2: 0.01827734339798862, 0.3: 0.014528405644659832, 0.6: 0.06536008804270439, 0.8: 0.11798612250664964, 0.7: 0.12391408417493704, 0.4: 0.030615754498018757, 0.5: 0.047105205602406594} + 'conv10_linear_weights': {0.1: 0.05113190831455035, 0.2: 0.07705573833558801, 0.3: 0.12096721757739311, 0.6: 0.5135061352930738, 0.8: 0.7908166677143281, 0.7: 0.7272187676899062, 0.4: 0.1819252083008504, 0.5: 0.3728054727792405} + } + The function would return a dict after loading the sensitivity file. The keys of the dict are name of parameters in each layer. And the value of key is the information about pruning sensitivity of correspoding layer. In example, pruning 10% filter of the layer corresponding to conv10_expand_weights would lead to 0.65% degradation of model performance. The details could be seen at: [Sensitivity analysis](https://github.com/PaddlePaddle/PaddleSlim/blob/develop/docs/zh_cn/algo/algo.md#2-%E5%8D%B7%E7%A7%AF%E6%A0%B8%E5%89%AA%E8%A3%81%E5%8E%9F%E7%90%86) + + +Enter the PaddleOCR root directory,perform sensitivity analysis on the model with the following command: + +```bash + +python3.7 deploy/slim/prune/sensitivity_anal.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o Global.pretrain_weights="your trained model" + +``` + + +### 5. Export inference model and deploy it + +We can export the pruned model as inference_model for deployment: +```bash +python deploy/slim/prune/export_prune_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o Global.pretrain_weights=./output/det_db/best_accuracy Global.test_batch_size_per_card=1 Global.save_inference_dir=inference_model +``` + +Reference for prediction and deployment of inference model: +1. [inference model python prediction](../../../doc/doc_en/inference_en.md) +2. [inference model C++ prediction](../../cpp_infer/readme_en.md) diff --git a/deploy/slim/prune/export_prune_model.py b/deploy/slim/prune/export_prune_model.py new file mode 100644 index 00000000..541273c8 --- /dev/null +++ b/deploy/slim/prune/export_prune_model.py @@ -0,0 +1,132 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import sys + +__dir__ = os.path.dirname(__file__) +sys.path.append(__dir__) +sys.path.append(os.path.join(__dir__, '..', '..', '..')) +sys.path.append(os.path.join(__dir__, '..', '..', '..', 'tools')) + +import paddle +import paddle.distributed as dist +from ppocr.data import build_dataloader +from ppocr.modeling.architectures import build_model +from ppocr.losses import build_loss +from ppocr.optimizer import build_optimizer +from ppocr.postprocess import build_post_process +from ppocr.metrics import build_metric +from ppocr.utils.save_load import init_model +import tools.program as program + +dist.get_world_size() + + +def main(config, device, logger, vdl_writer): + # init dist environment + if config['Global']['distributed']: + dist.init_parallel_env() + + global_config = config['Global'] + + # build dataloader + valid_dataloader = build_dataloader(config, 'Eval', device, logger) + + # build post process + post_process_class = build_post_process(config['PostProcess'], + global_config) + + # build model + # for rec algorithm + if hasattr(post_process_class, 'character'): + char_num = len(getattr(post_process_class, 'character')) + config['Architecture']["Head"]['out_channels'] = char_num + model = build_model(config['Architecture']) + + flops = paddle.flops(model, [1, 3, 640, 640]) + logger.info(f"FLOPs before pruning: {flops}") + + from paddleslim.dygraph import FPGMFilterPruner + model.train() + pruner = FPGMFilterPruner(model, [1, 3, 640, 640]) + + # build metric + eval_class = build_metric(config['Metric']) + + def eval_fn(): + metric = program.eval(model, valid_dataloader, post_process_class, + eval_class) + logger.info(f"metric['hmean']: {metric['hmean']}") + return metric['hmean'] + + params_sensitive = pruner.sensitive( + eval_func=eval_fn, + sen_file="./sen.pickle", + skip_vars=[ + "conv2d_57.w_0", "conv2d_transpose_2.w_0", "conv2d_transpose_3.w_0" + ]) + + logger.info( + "The sensitivity analysis results of model parameters saved in sen.pickle" + ) + # calculate pruned params's ratio + params_sensitive = pruner._get_ratios_by_loss(params_sensitive, loss=0.02) + for key in params_sensitive.keys(): + logger.info(f"{key}, {params_sensitive[key]}") + + plan = pruner.prune_vars(params_sensitive, [0]) + + flops = paddle.flops(model, [1, 3, 640, 640]) + logger.info(f"FLOPs after pruning: {flops}") + + # load pretrain model + pre_best_model_dict = init_model(config, model, logger, None) + metric = program.eval(model, valid_dataloader, post_process_class, + eval_class) + logger.info(f"metric['hmean']: {metric['hmean']}") + + # start export model + from paddle.jit import to_static + + infer_shape = [3, -1, -1] + if config['Architecture']['model_type'] == "rec": + infer_shape = [3, 32, -1] # for rec model, H must be 32 + + if 'Transform' in config['Architecture'] and config['Architecture'][ + 'Transform'] is not None and config['Architecture'][ + 'Transform']['name'] == 'TPS': + logger.info( + 'When there is tps in the network, variable length input is not supported, and the input size needs to be the same as during training' + ) + infer_shape[-1] = 100 + model = to_static( + model, + input_spec=[ + paddle.static.InputSpec( + shape=[None] + infer_shape, dtype='float32') + ]) + + save_path = '{}/inference'.format(config['Global']['save_inference_dir']) + paddle.jit.save(model, save_path) + logger.info('inference model is saved to {}'.format(save_path)) + + +if __name__ == '__main__': + config, device, logger, vdl_writer = program.preprocess(is_train=True) + main(config, device, logger, vdl_writer) diff --git a/deploy/slim/prune/sensitivity_anal.py b/deploy/slim/prune/sensitivity_anal.py new file mode 100644 index 00000000..6abd9815 --- /dev/null +++ b/deploy/slim/prune/sensitivity_anal.py @@ -0,0 +1,171 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import sys + +__dir__ = os.path.dirname(__file__) +sys.path.append(__dir__) +sys.path.append(os.path.join(__dir__, '..', '..', '..')) +sys.path.append(os.path.join(__dir__, '..', '..', '..', 'tools')) + +import paddle +import paddle.distributed as dist +from ppocr.data import build_dataloader +from ppocr.modeling.architectures import build_model +from ppocr.losses import build_loss +from ppocr.optimizer import build_optimizer +from ppocr.postprocess import build_post_process +from ppocr.metrics import build_metric +from ppocr.utils.save_load import init_model +import tools.program as program + +dist.get_world_size() + + +def get_pruned_params(parameters): + params = [] + + for param in parameters: + if len( + param.shape + ) == 4 and 'depthwise' not in param.name and 'transpose' not in param.name and "conv2d_57" not in param.name and "conv2d_56" not in param.name: + params.append(param.name) + return params + + +def main(config, device, logger, vdl_writer): + # init dist environment + if config['Global']['distributed']: + dist.init_parallel_env() + + global_config = config['Global'] + + # build dataloader + train_dataloader = build_dataloader(config, 'Train', device, logger) + if config['Eval']: + valid_dataloader = build_dataloader(config, 'Eval', device, logger) + else: + valid_dataloader = None + + # build post process + post_process_class = build_post_process(config['PostProcess'], + global_config) + + # build model + # for rec algorithm + if hasattr(post_process_class, 'character'): + char_num = len(getattr(post_process_class, 'character')) + config['Architecture']["Head"]['out_channels'] = char_num + model = build_model(config['Architecture']) + + flops = paddle.flops(model, [1, 3, 640, 640]) + logger.info(f"FLOPs before pruning: {flops}") + + from paddleslim.dygraph import FPGMFilterPruner + model.train() + pruner = FPGMFilterPruner(model, [1, 3, 640, 640]) + + # build loss + loss_class = build_loss(config['Loss']) + + # build optim + optimizer, lr_scheduler = build_optimizer( + config['Optimizer'], + epochs=config['Global']['epoch_num'], + step_each_epoch=len(train_dataloader), + parameters=model.parameters()) + + # build metric + eval_class = build_metric(config['Metric']) + # load pretrain model + pre_best_model_dict = init_model(config, model, logger, optimizer) + + logger.info('train dataloader has {} iters, valid dataloader has {} iters'. + format(len(train_dataloader), len(valid_dataloader))) + # build metric + eval_class = build_metric(config['Metric']) + + logger.info('train dataloader has {} iters, valid dataloader has {} iters'. + format(len(train_dataloader), len(valid_dataloader))) + + def eval_fn(): + metric = program.eval(model, valid_dataloader, post_process_class, + eval_class) + logger.info(f"metric['hmean']: {metric['hmean']}") + return metric['hmean'] + + params_sensitive = pruner.sensitive( + eval_func=eval_fn, + sen_file="./sen.pickle", + skip_vars=[ + "conv2d_57.w_0", "conv2d_transpose_2.w_0", "conv2d_transpose_3.w_0" + ]) + + logger.info( + "The sensitivity analysis results of model parameters saved in sen.pickle" + ) + # calculate pruned params's ratio + params_sensitive = pruner._get_ratios_by_loss(params_sensitive, loss=0.02) + for key in params_sensitive.keys(): + logger.info(f"{key}, {params_sensitive[key]}") + + plan = pruner.prune_vars(params_sensitive, [0]) + for param in model.parameters(): + if ("weights" in param.name and "conv" in param.name) or ( + "w_0" in param.name and "conv2d" in param.name): + logger.info(f"{param.name}: {param.shape}") + + flops = paddle.flops(model, [1, 3, 640, 640]) + logger.info(f"FLOPs after pruning: {flops}") + + # start train + + program.train(config, train_dataloader, valid_dataloader, device, model, + loss_class, optimizer, lr_scheduler, post_process_class, + eval_class, pre_best_model_dict, logger, vdl_writer) + mode = 'infer' + if mode == 'infer': + from paddle.jit import to_static + + infer_shape = [3, -1, -1] + if config['Architecture']['model_type'] == "rec": + infer_shape = [3, 32, -1] # for rec model, H must be 32 + if 'Transform' in config['Architecture'] and config['Architecture'][ + 'Transform'] is not None and config['Architecture'][ + 'Transform']['name'] == 'TPS': + logger.info( + 'When there is tps in the network, variable length input is not supported, and the input size needs to be the same as during training' + ) + infer_shape[-1] = 100 + model = to_static( + model, + input_spec=[ + paddle.static.InputSpec( + shape=[None] + infer_shape, dtype='float32') + ]) + + save_path = '{}/inference'.format(config['Global'][ + 'save_inference_dir']) + paddle.jit.save(model, save_path) + logger.info('inference model is saved to {}'.format(save_path)) + + +if __name__ == '__main__': + config, device, logger, vdl_writer = program.preprocess(is_train=True) + main(config, device, logger, vdl_writer) From 6bafa1cec4116813d565746e089abe4cea12bf3f Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Fri, 19 Feb 2021 12:39:43 +0800 Subject: [PATCH 70/77] add prune demo --- deploy/slim/prune/sensitivity_anal.py | 25 ------------------------- 1 file changed, 25 deletions(-) diff --git a/deploy/slim/prune/sensitivity_anal.py b/deploy/slim/prune/sensitivity_anal.py index 6abd9815..bd2b9649 100644 --- a/deploy/slim/prune/sensitivity_anal.py +++ b/deploy/slim/prune/sensitivity_anal.py @@ -139,31 +139,6 @@ def main(config, device, logger, vdl_writer): program.train(config, train_dataloader, valid_dataloader, device, model, loss_class, optimizer, lr_scheduler, post_process_class, eval_class, pre_best_model_dict, logger, vdl_writer) - mode = 'infer' - if mode == 'infer': - from paddle.jit import to_static - - infer_shape = [3, -1, -1] - if config['Architecture']['model_type'] == "rec": - infer_shape = [3, 32, -1] # for rec model, H must be 32 - if 'Transform' in config['Architecture'] and config['Architecture'][ - 'Transform'] is not None and config['Architecture'][ - 'Transform']['name'] == 'TPS': - logger.info( - 'When there is tps in the network, variable length input is not supported, and the input size needs to be the same as during training' - ) - infer_shape[-1] = 100 - model = to_static( - model, - input_spec=[ - paddle.static.InputSpec( - shape=[None] + infer_shape, dtype='float32') - ]) - - save_path = '{}/inference'.format(config['Global'][ - 'save_inference_dir']) - paddle.jit.save(model, save_path) - logger.info('inference model is saved to {}'.format(save_path)) if __name__ == '__main__': From 6e07bf08a2d5bddcba7bf5e653ffafb7fed1f765 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Fri, 19 Feb 2021 13:21:28 +0800 Subject: [PATCH 71/77] delete dist when export model --- deploy/slim/prune/export_prune_model.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/deploy/slim/prune/export_prune_model.py b/deploy/slim/prune/export_prune_model.py index 541273c8..29f7d211 100644 --- a/deploy/slim/prune/export_prune_model.py +++ b/deploy/slim/prune/export_prune_model.py @@ -25,23 +25,16 @@ sys.path.append(os.path.join(__dir__, '..', '..', '..')) sys.path.append(os.path.join(__dir__, '..', '..', '..', 'tools')) import paddle -import paddle.distributed as dist from ppocr.data import build_dataloader from ppocr.modeling.architectures import build_model -from ppocr.losses import build_loss -from ppocr.optimizer import build_optimizer + from ppocr.postprocess import build_post_process from ppocr.metrics import build_metric from ppocr.utils.save_load import init_model import tools.program as program -dist.get_world_size() - def main(config, device, logger, vdl_writer): - # init dist environment - if config['Global']['distributed']: - dist.init_parallel_env() global_config = config['Global'] From 0507402a0f8522d582263c48fdde4630e235e24d Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Fri, 19 Feb 2021 16:29:54 +0800 Subject: [PATCH 72/77] fix export bug --- ppocr/modeling/heads/rec_att_head.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ppocr/modeling/heads/rec_att_head.py b/ppocr/modeling/heads/rec_att_head.py index 59e26c1e..0d222714 100644 --- a/ppocr/modeling/heads/rec_att_head.py +++ b/ppocr/modeling/heads/rec_att_head.py @@ -57,6 +57,9 @@ class AttentionHead(nn.Layer): else: targets = paddle.zeros(shape=[batch_size], dtype="int32") probs = None + char_onehots = None + outputs = None + alpha = None for i in range(num_steps): char_onehots = self._char_to_onehot( @@ -146,9 +149,6 @@ class AttentionLSTM(nn.Layer): else: targets = paddle.zeros(shape=[batch_size], dtype="int32") probs = None - char_onehots = None - outputs = None - alpha = None for i in range(num_steps): char_onehots = self._char_to_onehot( From 9da00464a0462fbf89957c76f5ed9751fd62e276 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Fri, 19 Feb 2021 19:29:30 +0800 Subject: [PATCH 73/77] add use_dilation params in hubserving --- deploy/hubserving/ocr_det/params.py | 3 ++- deploy/hubserving/ocr_system/params.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/deploy/hubserving/ocr_det/params.py b/deploy/hubserving/ocr_det/params.py index 13215890..7be88e9b 100755 --- a/deploy/hubserving/ocr_det/params.py +++ b/deploy/hubserving/ocr_det/params.py @@ -20,7 +20,8 @@ def read_params(): #DB parmas cfg.det_db_thresh = 0.3 cfg.det_db_box_thresh = 0.5 - cfg.det_db_unclip_ratio = 2.0 + cfg.det_db_unclip_ratio = 1.6 + cfg.use_dilation = False # #EAST parmas # cfg.det_east_score_thresh = 0.8 diff --git a/deploy/hubserving/ocr_system/params.py b/deploy/hubserving/ocr_system/params.py index add46666..bd56dc2e 100755 --- a/deploy/hubserving/ocr_system/params.py +++ b/deploy/hubserving/ocr_system/params.py @@ -20,7 +20,8 @@ def read_params(): #DB parmas cfg.det_db_thresh = 0.3 cfg.det_db_box_thresh = 0.5 - cfg.det_db_unclip_ratio = 2.0 + cfg.det_db_unclip_ratio = 1.6 + cfg.use_dilation = False #EAST parmas cfg.det_east_score_thresh = 0.8 From fa4a9962bd2bec09c55d676b15594b0a28c6e611 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Sat, 20 Feb 2021 09:53:12 +0800 Subject: [PATCH 74/77] fix dilation --- paddleocr.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/paddleocr.py b/paddleocr.py index db24aa59..7c126261 100644 --- a/paddleocr.py +++ b/paddleocr.py @@ -146,7 +146,8 @@ def parse_args(mMain=True, add_help=True): # DB parmas parser.add_argument("--det_db_thresh", type=float, default=0.3) parser.add_argument("--det_db_box_thresh", type=float, default=0.5) - parser.add_argument("--det_db_unclip_ratio", type=float, default=2.0) + parser.add_argument("--det_db_unclip_ratio", type=float, default=1.6) + parser.add_argument("--use_dilation", type=bool, default=False) # EAST parmas parser.add_argument("--det_east_score_thresh", type=float, default=0.8) @@ -193,7 +194,8 @@ def parse_args(mMain=True, add_help=True): det_limit_type='max', det_db_thresh=0.3, det_db_box_thresh=0.5, - det_db_unclip_ratio=2.0, + det_db_unclip_ratio=1.6, + use_dilation=False, det_east_score_thresh=0.8, det_east_cover_thresh=0.1, det_east_nms_thresh=0.2, From 8f9dda8608d3e7af24f141b7ed76a030e21a3812 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Mon, 22 Feb 2021 09:53:43 +0800 Subject: [PATCH 75/77] fix issue 2013 --- ppocr/data/imaug/make_shrink_map.py | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/ppocr/data/imaug/make_shrink_map.py b/ppocr/data/imaug/make_shrink_map.py index 8e612235..03385e3e 100644 --- a/ppocr/data/imaug/make_shrink_map.py +++ b/ppocr/data/imaug/make_shrink_map.py @@ -44,21 +44,34 @@ class MakeShrinkMap(object): ignore_tags[i] = True else: polygon_shape = Polygon(polygon) - distance = polygon_shape.area * ( - 1 - np.power(self.shrink_ratio, 2)) / polygon_shape.length - subject = [tuple(l) for l in text_polys[i]] + subject = [tuple(l) for l in polygon] padding = pyclipper.PyclipperOffset() padding.AddPath(subject, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON) - shrinked = padding.Execute(-distance) + shrinked = [] + + # Increase the shrink ratio every time we get multiple polygon returned back + possible_ratios = np.arange(self.shrink_ratio, 1, + self.shrink_ratio) + np.append(possible_ratios, 1) + # print(possible_ratios) + for ratio in possible_ratios: + # print(f"Change shrink ratio to {ratio}") + distance = polygon_shape.area * ( + 1 - np.power(ratio, 2)) / polygon_shape.length + shrinked = padding.Execute(-distance) + if len(shrinked) == 1: + break + if shrinked == []: cv2.fillPoly(mask, polygon.astype(np.int32)[np.newaxis, :, :], 0) ignore_tags[i] = True continue - shrinked = np.array(shrinked[0]).reshape(-1, 2) - cv2.fillPoly(gt, [shrinked.astype(np.int32)], 1) - # cv2.fillPoly(gt[0], [shrinked.astype(np.int32)], 1) + + for each_shirnk in shrinked: + shirnk = np.array(each_shirnk).reshape(-1, 2) + cv2.fillPoly(gt, [shirnk.astype(np.int32)], 1) data['shrink_map'] = gt data['shrink_mask'] = mask From d8719969ba31011f12955991b5b240c2d8189cb1 Mon Sep 17 00:00:00 2001 From: littletomatodonkey <2120160898@bit.edu.cn> Date: Mon, 22 Feb 2021 11:14:13 +0800 Subject: [PATCH 76/77] improve style text infer process (#2055) * improve style text * fix dead loop --- StyleText/engine/predictors.py | 25 +++++++++++++- StyleText/engine/synthesisers.py | 20 +++++++---- StyleText/engine/text_drawers.py | 59 +++++++++++++++++++++++--------- 3 files changed, 80 insertions(+), 24 deletions(-) diff --git a/StyleText/engine/predictors.py b/StyleText/engine/predictors.py index a1ba21f1..ca9ab9ce 100644 --- a/StyleText/engine/predictors.py +++ b/StyleText/engine/predictors.py @@ -38,7 +38,15 @@ class StyleTextRecPredictor(object): self.std = config["Predictor"]["std"] self.expand_result = config["Predictor"]["expand_result"] - def predict(self, style_input, text_input): + def reshape_to_same_height(self, img_list): + h = img_list[0].shape[0] + for idx in range(1, len(img_list)): + new_w = round(1.0 * img_list[idx].shape[1] / + img_list[idx].shape[0] * h) + img_list[idx] = cv2.resize(img_list[idx], (new_w, h)) + return img_list + + def predict_single_image(self, style_input, text_input): style_input = self.rep_style_input(style_input, text_input) tensor_style_input = self.preprocess(style_input) tensor_text_input = self.preprocess(text_input) @@ -64,6 +72,21 @@ class StyleTextRecPredictor(object): "fake_bg": fake_bg, } + def predict(self, style_input, text_input_list): + if not isinstance(text_input_list, (tuple, list)): + return self.predict_single_image(style_input, text_input_list) + + synth_result_list = [] + for text_input in text_input_list: + synth_result = self.predict_single_image(style_input, text_input) + synth_result_list.append(synth_result) + + for key in synth_result: + res = [r[key] for r in synth_result_list] + res = self.reshape_to_same_height(res) + synth_result[key] = np.concatenate(res, axis=1) + return synth_result + def preprocess(self, img): img = (img.astype('float32') * self.scale - self.mean) / self.std img_height, img_width, channel = img.shape diff --git a/StyleText/engine/synthesisers.py b/StyleText/engine/synthesisers.py index 177e3e04..6461d9e3 100644 --- a/StyleText/engine/synthesisers.py +++ b/StyleText/engine/synthesisers.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. import os +import numpy as np +import cv2 from utils.config import ArgsParser, load_config, override_config from utils.logging import get_logger @@ -36,8 +38,9 @@ class ImageSynthesiser(object): self.predictor = getattr(predictors, predictor_method)(self.config) def synth_image(self, corpus, style_input, language="en"): - corpus, text_input = self.text_drawer.draw_text(corpus, language) - synth_result = self.predictor.predict(style_input, text_input) + corpus_list, text_input_list = self.text_drawer.draw_text( + corpus, language, style_input_width=style_input.shape[1]) + synth_result = self.predictor.predict(style_input, text_input_list) return synth_result @@ -59,12 +62,15 @@ class DatasetSynthesiser(ImageSynthesiser): for i in range(self.output_num): style_data = self.style_sampler.sample() style_input = style_data["image"] - corpus_language, text_input_label = self.corpus_generator.generate( - ) - text_input_label, text_input = self.text_drawer.draw_text( - text_input_label, corpus_language) + corpus_language, text_input_label = self.corpus_generator.generate() + text_input_label_list, text_input_list = self.text_drawer.draw_text( + text_input_label, + corpus_language, + style_input_width=style_input.shape[1]) - synth_result = self.predictor.predict(style_input, text_input) + text_input_label = "".join(text_input_label_list) + + synth_result = self.predictor.predict(style_input, text_input_list) fake_fusion = synth_result["fake_fusion"] self.writer.save_image(fake_fusion, text_input_label) self.writer.save_label() diff --git a/StyleText/engine/text_drawers.py b/StyleText/engine/text_drawers.py index 8aaac06e..aeec75c3 100644 --- a/StyleText/engine/text_drawers.py +++ b/StyleText/engine/text_drawers.py @@ -1,5 +1,6 @@ from PIL import Image, ImageDraw, ImageFont import numpy as np +import cv2 from utils.logging import get_logger @@ -28,7 +29,11 @@ class StdTextDrawer(object): else: return int((self.height - 4)**2 / font_height) - def draw_text(self, corpus, language="en", crop=True): + def draw_text(self, + corpus, + language="en", + crop=True, + style_input_width=None): if language not in self.support_languages: self.logger.warning( "language {} not supported, use en instead.".format(language)) @@ -37,21 +42,43 @@ class StdTextDrawer(object): width = min(self.max_width, len(corpus) * self.height) + 4 else: width = len(corpus) * self.height + 4 - bg = Image.new("RGB", (width, self.height), color=(127, 127, 127)) - draw = ImageDraw.Draw(bg) - char_x = 2 - font = self.font_dict[language] - for i, char_i in enumerate(corpus): - char_size = font.getsize(char_i)[0] - draw.text((char_x, 2), char_i, fill=(0, 0, 0), font=font) - char_x += char_size - if char_x >= width: - corpus = corpus[0:i + 1] - self.logger.warning("corpus length exceed limit: {}".format( - corpus)) + if style_input_width is not None: + width = min(width, style_input_width) + + corpus_list = [] + text_input_list = [] + + while len(corpus) != 0: + bg = Image.new("RGB", (width, self.height), color=(127, 127, 127)) + draw = ImageDraw.Draw(bg) + char_x = 2 + font = self.font_dict[language] + i = 0 + while i < len(corpus): + char_i = corpus[i] + char_size = font.getsize(char_i)[0] + # split when char_x exceeds char size and index is not 0 (at least 1 char should be wroten on the image) + if char_x + char_size >= width and i != 0: + text_input = np.array(bg).astype(np.uint8) + text_input = text_input[:, 0:char_x, :] + + corpus_list.append(corpus[0:i]) + text_input_list.append(text_input) + corpus = corpus[i:] + break + draw.text((char_x, 2), char_i, fill=(0, 0, 0), font=font) + char_x += char_size + + i += 1 + # the whole text is shorter than style input + if i == len(corpus): + text_input = np.array(bg).astype(np.uint8) + text_input = text_input[:, 0:char_x, :] + + corpus_list.append(corpus[0:i]) + text_input_list.append(text_input) + corpus = corpus[i:] break - text_input = np.array(bg).astype(np.uint8) - text_input = text_input[:, 0:char_x, :] - return corpus, text_input + return corpus_list, text_input_list From 3ce97f18ec83c3a3cfaefae8a2b01696d628f84d Mon Sep 17 00:00:00 2001 From: littletomatodonkey <2120160898@bit.edu.cn> Date: Mon, 22 Feb 2021 15:12:32 +0800 Subject: [PATCH 77/77] fix predict rec (#2065) * fix predict rec * fix lopp --- tools/infer/predict_rec.py | 41 +++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/tools/infer/predict_rec.py b/tools/infer/predict_rec.py index b3d9d490..b24e57dd 100755 --- a/tools/infer/predict_rec.py +++ b/tools/infer/predict_rec.py @@ -248,9 +248,11 @@ class TextRecognizer(object): def main(args): image_file_list = get_image_file_list(args.image_dir) text_recognizer = TextRecognizer(args) + total_run_time = 0.0 + total_images_num = 0 valid_image_file_list = [] img_list = [] - for image_file in image_file_list: + for idx, image_file in enumerate(image_file_list): img, flag = check_and_read_gif(image_file) if not flag: img = cv2.imread(image_file) @@ -259,22 +261,29 @@ def main(args): continue valid_image_file_list.append(image_file) img_list.append(img) - try: - rec_res, predict_time = text_recognizer(img_list) - except: - logger.info(traceback.format_exc()) - logger.info( - "ERROR!!!! \n" - "Please read the FAQ:https://github.com/PaddlePaddle/PaddleOCR#faq \n" - "If your model has tps module: " - "TPS does not support variable shape.\n" - "Please set --rec_image_shape='3,32,100' and --rec_char_type='en' ") - exit() - for ino in range(len(img_list)): - logger.info("Predicts of {}:{}".format(valid_image_file_list[ino], - rec_res[ino])) + if len(img_list) >= args.rec_batch_num or idx == len( + image_file_list) - 1: + try: + rec_res, predict_time = text_recognizer(img_list) + total_run_time += predict_time + except: + logger.info(traceback.format_exc()) + logger.info( + "ERROR!!!! \n" + "Please read the FAQ:https://github.com/PaddlePaddle/PaddleOCR#faq \n" + "If your model has tps module: " + "TPS does not support variable shape.\n" + "Please set --rec_image_shape='3,32,100' and --rec_char_type='en' " + ) + exit() + for ino in range(len(img_list)): + logger.info("Predicts of {}:{}".format(valid_image_file_list[ + ino], rec_res[ino])) + total_images_num += len(valid_image_file_list) + valid_image_file_list = [] + img_list = [] logger.info("Total predict time for {} images, cost: {:.3f}".format( - len(img_list), predict_time)) + total_images_num, total_run_time)) if __name__ == "__main__":