275 lines
9.4 KiB
Python
Executable File
275 lines
9.4 KiB
Python
Executable File
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
import argparse
|
|
import os, sys
|
|
from ppocr.utils.utility import initial_logger
|
|
logger = initial_logger()
|
|
from paddle.fluid.core import PaddleTensor
|
|
from paddle.fluid.core import AnalysisConfig
|
|
from paddle.fluid.core import create_paddle_predictor
|
|
import cv2
|
|
import numpy as np
|
|
import json
|
|
from PIL import Image, ImageDraw, ImageFont
|
|
import math
|
|
|
|
|
|
def parse_args():
|
|
def str2bool(v):
|
|
return v.lower() in ("true", "t", "1")
|
|
|
|
parser = argparse.ArgumentParser()
|
|
#params for prediction engine
|
|
parser.add_argument("--use_gpu", type=str2bool, default=True)
|
|
parser.add_argument("--ir_optim", type=str2bool, default=True)
|
|
parser.add_argument("--use_tensorrt", type=str2bool, default=False)
|
|
parser.add_argument("--gpu_mem", type=int, default=8000)
|
|
|
|
#params for text detector
|
|
parser.add_argument("--image_dir", type=str)
|
|
parser.add_argument("--det_algorithm", type=str, default='DB')
|
|
parser.add_argument("--det_model_dir", type=str)
|
|
parser.add_argument("--det_max_side_len", type=float, default=960)
|
|
|
|
#DB parmas
|
|
parser.add_argument("--det_db_thresh", type=float, default=0.3)
|
|
parser.add_argument("--det_db_box_thresh", type=float, default=0.5)
|
|
parser.add_argument("--det_db_unclip_ratio", type=float, default=2.0)
|
|
|
|
#EAST parmas
|
|
parser.add_argument("--det_east_score_thresh", type=float, default=0.8)
|
|
parser.add_argument("--det_east_cover_thresh", type=float, default=0.1)
|
|
parser.add_argument("--det_east_nms_thresh", type=float, default=0.2)
|
|
|
|
#params for text recognizer
|
|
parser.add_argument("--rec_algorithm", type=str, default='CRNN')
|
|
parser.add_argument("--rec_model_dir", type=str)
|
|
parser.add_argument("--rec_image_shape", type=str, default="3, 32, 320")
|
|
parser.add_argument("--rec_char_type", type=str, default='ch')
|
|
parser.add_argument("--rec_batch_num", type=int, default=30)
|
|
parser.add_argument(
|
|
"--rec_char_dict_path",
|
|
type=str,
|
|
default="./ppocr/utils/ppocr_keys_v1.txt")
|
|
return parser.parse_args()
|
|
|
|
|
|
def create_predictor(args, mode):
|
|
if mode == "det":
|
|
model_dir = args.det_model_dir
|
|
else:
|
|
model_dir = args.rec_model_dir
|
|
|
|
if model_dir is None:
|
|
logger.info("not find {} model file path {}".format(mode, model_dir))
|
|
sys.exit(0)
|
|
model_file_path = model_dir + "/model"
|
|
params_file_path = model_dir + "/params"
|
|
if not os.path.exists(model_file_path):
|
|
logger.info("not find model file path {}".format(model_file_path))
|
|
sys.exit(0)
|
|
if not os.path.exists(params_file_path):
|
|
logger.info("not find params file path {}".format(params_file_path))
|
|
sys.exit(0)
|
|
|
|
config = AnalysisConfig(model_file_path, params_file_path)
|
|
|
|
if args.use_gpu:
|
|
config.enable_use_gpu(args.gpu_mem, 0)
|
|
else:
|
|
config.disable_gpu()
|
|
|
|
config.disable_glog_info()
|
|
|
|
# use zero copy
|
|
config.delete_pass("conv_transpose_eltwiseadd_bn_fuse_pass")
|
|
config.switch_use_feed_fetch_ops(False)
|
|
predictor = create_paddle_predictor(config)
|
|
input_names = predictor.get_input_names()
|
|
input_tensor = predictor.get_input_tensor(input_names[0])
|
|
output_names = predictor.get_output_names()
|
|
output_tensors = []
|
|
for output_name in output_names:
|
|
output_tensor = predictor.get_output_tensor(output_name)
|
|
output_tensors.append(output_tensor)
|
|
return predictor, input_tensor, output_tensors
|
|
|
|
|
|
def draw_text_det_res(dt_boxes, img_path):
|
|
src_im = cv2.imread(img_path)
|
|
for box in dt_boxes:
|
|
box = np.array(box).astype(np.int32).reshape(-1, 2)
|
|
cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2)
|
|
return src_im
|
|
|
|
|
|
def resize_img(img, input_size=600):
|
|
"""
|
|
resize img and limit the longest side of the image to input_size
|
|
"""
|
|
img = np.array(img)
|
|
im_shape = img.shape
|
|
im_size_max = np.max(im_shape[0:2])
|
|
im_scale = float(input_size) / float(im_size_max)
|
|
im = cv2.resize(img, None, None, fx=im_scale, fy=im_scale)
|
|
return im
|
|
|
|
|
|
def draw_ocr(image, boxes, txts, scores, draw_txt=True, drop_score=0.5):
|
|
"""
|
|
Visualize the results of OCR detection and recognition
|
|
args:
|
|
image(Image|array): RGB image
|
|
boxes(list): boxes with shape(N, 4, 2)
|
|
txts(list): the texts
|
|
scores(list): txxs corresponding scores
|
|
draw_txt(bool): whether draw text or not
|
|
drop_score(float): only scores greater than drop_threshold will be visualized
|
|
return(array):
|
|
the visualized img
|
|
"""
|
|
if scores is None:
|
|
scores = [1] * len(boxes)
|
|
for (box, score) in zip(boxes, scores):
|
|
if score < drop_score or math.isnan(score):
|
|
continue
|
|
box = np.reshape(np.array(box), [-1, 1, 2]).astype(np.int64)
|
|
image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2)
|
|
|
|
if draw_txt:
|
|
img = np.array(resize_img(image, input_size=600))
|
|
txt_img = text_visual(
|
|
txts, scores, img_h=img.shape[0], img_w=600, threshold=drop_score)
|
|
img = np.concatenate([np.array(img), np.array(txt_img)], axis=1)
|
|
return img
|
|
return image
|
|
|
|
|
|
def str_count(s):
|
|
"""
|
|
Count the number of Chinese characters,
|
|
a single English character and a single number
|
|
equal to half the length of Chinese characters.
|
|
|
|
args:
|
|
s(string): the input of string
|
|
return(int):
|
|
the number of Chinese characters
|
|
"""
|
|
import string
|
|
count_zh = count_pu = 0
|
|
s_len = len(s)
|
|
en_dg_count = 0
|
|
for c in s:
|
|
if c in string.ascii_letters or c.isdigit() or c.isspace():
|
|
en_dg_count += 1
|
|
elif c.isalpha():
|
|
count_zh += 1
|
|
else:
|
|
count_pu += 1
|
|
return s_len - math.ceil(en_dg_count / 2)
|
|
|
|
|
|
def text_visual(texts, scores, img_h=400, img_w=600, threshold=0.):
|
|
"""
|
|
create new blank img and draw txt on it
|
|
args:
|
|
texts(list): the text will be draw
|
|
scores(list|None): corresponding score of each txt
|
|
img_h(int): the height of blank img
|
|
img_w(int): the width of blank img
|
|
return(array):
|
|
|
|
"""
|
|
if scores is not None:
|
|
assert len(texts) == len(
|
|
scores), "The number of txts and corresponding scores must match"
|
|
|
|
def create_blank_img():
|
|
blank_img = np.ones(shape=[img_h, img_w], dtype=np.int8) * 255
|
|
blank_img[:, img_w - 1:] = 0
|
|
blank_img = Image.fromarray(blank_img).convert("RGB")
|
|
draw_txt = ImageDraw.Draw(blank_img)
|
|
return blank_img, draw_txt
|
|
|
|
blank_img, draw_txt = create_blank_img()
|
|
|
|
font_size = 20
|
|
txt_color = (0, 0, 0)
|
|
font = ImageFont.truetype("./doc/simfang.ttf", font_size, encoding="utf-8")
|
|
|
|
gap = font_size + 5
|
|
txt_img_list = []
|
|
count, index = 1, 0
|
|
for idx, txt in enumerate(texts):
|
|
index += 1
|
|
if scores[idx] < threshold or math.isnan(scores[idx]):
|
|
index -= 1
|
|
continue
|
|
first_line = True
|
|
while str_count(txt) >= img_w // font_size - 4:
|
|
tmp = txt
|
|
txt = tmp[:img_w // font_size - 4]
|
|
if first_line:
|
|
new_txt = str(index) + ': ' + txt
|
|
first_line = False
|
|
else:
|
|
new_txt = ' ' + txt
|
|
draw_txt.text((0, gap * count), new_txt, txt_color, font=font)
|
|
txt = tmp[img_w // font_size - 4:]
|
|
if count >= img_h // gap - 1:
|
|
txt_img_list.append(np.array(blank_img))
|
|
blank_img, draw_txt = create_blank_img()
|
|
count = 0
|
|
count += 1
|
|
if first_line:
|
|
new_txt = str(index) + ': ' + txt + ' ' + '%.3f' % (scores[idx])
|
|
else:
|
|
new_txt = " " + txt + " " + '%.3f' % (scores[idx])
|
|
draw_txt.text((0, gap * count), new_txt, txt_color, font=font)
|
|
# whether add new blank img or not
|
|
if count >= img_h // gap - 1 and idx + 1 < len(texts):
|
|
txt_img_list.append(np.array(blank_img))
|
|
blank_img, draw_txt = create_blank_img()
|
|
count = 0
|
|
count += 1
|
|
txt_img_list.append(np.array(blank_img))
|
|
if len(txt_img_list) == 1:
|
|
blank_img = np.array(txt_img_list[0])
|
|
else:
|
|
blank_img = np.concatenate(txt_img_list, axis=1)
|
|
return np.array(blank_img)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
test_img = "./doc/test_v2"
|
|
predict_txt = "./doc/predict.txt"
|
|
f = open(predict_txt, 'r')
|
|
data = f.readlines()
|
|
img_path, anno = data[0].strip().split('\t')
|
|
img_name = os.path.basename(img_path)
|
|
img_path = os.path.join(test_img, img_name)
|
|
image = Image.open(img_path)
|
|
|
|
data = json.loads(anno)
|
|
boxes, txts, scores = [], [], []
|
|
for dic in data:
|
|
boxes.append(dic['points'])
|
|
txts.append(dic['transcription'])
|
|
scores.append(round(dic['scores'], 3))
|
|
|
|
new_img = draw_ocr(image, boxes, txts, scores, draw_txt=True)
|
|
|
|
cv2.imwrite(img_name, new_img) |