merge all
This commit is contained in:
parent
874a32625f
commit
7d53da943e
|
@ -1,4 +1,4 @@
|
|||
project(ocr_system CXX C)
|
||||
project(ppocr CXX C)
|
||||
|
||||
option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON)
|
||||
option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." OFF)
|
||||
|
@ -11,7 +11,7 @@ SET(CUDA_LIB "" CACHE PATH "Location of libraries")
|
|||
SET(CUDNN_LIB "" CACHE PATH "Location of libraries")
|
||||
SET(TENSORRT_DIR "" CACHE PATH "Compile demo with TensorRT")
|
||||
|
||||
set(DEMO_NAME "ocr_system")
|
||||
set(DEMO_NAME "ppocr")
|
||||
|
||||
|
||||
macro(safe_set_static_flag)
|
||||
|
@ -206,7 +206,7 @@ endif()
|
|||
|
||||
set(DEPS ${DEPS} ${OpenCV_LIBS})
|
||||
|
||||
AUX_SOURCE_DIRECTORY(./src_system SRCS)
|
||||
AUX_SOURCE_DIRECTORY(./src SRCS)
|
||||
add_executable(${DEMO_NAME} ${SRCS})
|
||||
|
||||
target_link_libraries(${DEMO_NAME} ${DEPS})
|
||||
|
|
|
@ -31,6 +31,8 @@
|
|||
* *
|
||||
*******************************************************************************/
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifndef clipper_hpp
|
||||
#define clipper_hpp
|
||||
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
|
|
|
@ -0,0 +1,336 @@
|
|||
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "glog/logging.h"
|
||||
#include "omp.h"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include <chrono>
|
||||
#include <iomanip>
|
||||
#include <iostream>
|
||||
#include <ostream>
|
||||
#include <vector>
|
||||
|
||||
#include <cstring>
|
||||
#include <fstream>
|
||||
#include <numeric>
|
||||
|
||||
#include <glog/logging.h>
|
||||
#include <include/ocr_det.h>
|
||||
#include <include/ocr_cls.h>
|
||||
#include <include/ocr_rec.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include <gflags/gflags.h>
|
||||
|
||||
DEFINE_bool(use_gpu, false, "Infering with GPU or CPU.");
|
||||
DEFINE_int32(gpu_id, 0, "Device id of GPU to execute.");
|
||||
DEFINE_int32(gpu_mem, 4000, "GPU id when infering with GPU.");
|
||||
DEFINE_int32(cpu_math_library_num_threads, 10, "Num of threads with CPU.");
|
||||
DEFINE_bool(use_mkldnn, false, "Whether use mkldnn with CPU.");
|
||||
DEFINE_bool(use_tensorrt, false, "Whether use tensorrt.");
|
||||
DEFINE_bool(use_fp16, false, "Whether use fp16 when use tensorrt.");
|
||||
// detection related
|
||||
DEFINE_string(image_dir, "", "Dir of input image.");
|
||||
DEFINE_string(det_model_dir, "", "Path of det inference model.");
|
||||
DEFINE_int32(max_side_len, 960, "max_side_len of input image.");
|
||||
DEFINE_double(det_db_thresh, 0.3, "Threshold of det_db_thresh.");
|
||||
DEFINE_double(det_db_box_thresh, 0.5, "Threshold of det_db_box_thresh.");
|
||||
DEFINE_double(det_db_unclip_ratio, 1.6, "Threshold of det_db_unclip_ratio.");
|
||||
DEFINE_bool(use_polygon_score, false, "Whether use polygon score.");
|
||||
DEFINE_bool(visualize, true, "Whether show the detection results.");
|
||||
// classification related
|
||||
DEFINE_bool(use_angle_cls, false, "Whether use use_angle_cls.");
|
||||
DEFINE_string(cls_model_dir, "", "Path of cls inference model.");
|
||||
DEFINE_double(cls_thresh, 0.9, "Threshold of cls_thresh.");
|
||||
// recognition related
|
||||
DEFINE_string(rec_model_dir, "", "Path of rec inference model.");
|
||||
DEFINE_string(char_list_file, "../../ppocr/utils/ppocr_keys_v1.txt", "Path of dictionary.");
|
||||
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
using namespace PaddleOCR;
|
||||
|
||||
|
||||
static bool PathExists(const std::string& path){
|
||||
#ifdef _WIN32
|
||||
struct _stat buffer;
|
||||
return (_stat(path.c_str(), &buffer) == 0);
|
||||
#else
|
||||
struct stat buffer;
|
||||
return (stat(path.c_str(), &buffer) == 0);
|
||||
#endif // !_WIN32
|
||||
}
|
||||
|
||||
|
||||
cv::Mat GetRotateCropImage(const cv::Mat &srcimage,
|
||||
std::vector<std::vector<int>> box) {
|
||||
cv::Mat image;
|
||||
srcimage.copyTo(image);
|
||||
std::vector<std::vector<int>> points = box;
|
||||
|
||||
int x_collect[4] = {box[0][0], box[1][0], box[2][0], box[3][0]};
|
||||
int y_collect[4] = {box[0][1], box[1][1], box[2][1], box[3][1]};
|
||||
int left = int(*std::min_element(x_collect, x_collect + 4));
|
||||
int right = int(*std::max_element(x_collect, x_collect + 4));
|
||||
int top = int(*std::min_element(y_collect, y_collect + 4));
|
||||
int bottom = int(*std::max_element(y_collect, y_collect + 4));
|
||||
|
||||
cv::Mat img_crop;
|
||||
image(cv::Rect(left, top, right - left, bottom - top)).copyTo(img_crop);
|
||||
|
||||
for (int i = 0; i < points.size(); i++) {
|
||||
points[i][0] -= left;
|
||||
points[i][1] -= top;
|
||||
}
|
||||
|
||||
int img_crop_width = int(sqrt(pow(points[0][0] - points[1][0], 2) +
|
||||
pow(points[0][1] - points[1][1], 2)));
|
||||
int img_crop_height = int(sqrt(pow(points[0][0] - points[3][0], 2) +
|
||||
pow(points[0][1] - points[3][1], 2)));
|
||||
|
||||
cv::Point2f pts_std[4];
|
||||
pts_std[0] = cv::Point2f(0., 0.);
|
||||
pts_std[1] = cv::Point2f(img_crop_width, 0.);
|
||||
pts_std[2] = cv::Point2f(img_crop_width, img_crop_height);
|
||||
pts_std[3] = cv::Point2f(0.f, img_crop_height);
|
||||
|
||||
cv::Point2f pointsf[4];
|
||||
pointsf[0] = cv::Point2f(points[0][0], points[0][1]);
|
||||
pointsf[1] = cv::Point2f(points[1][0], points[1][1]);
|
||||
pointsf[2] = cv::Point2f(points[2][0], points[2][1]);
|
||||
pointsf[3] = cv::Point2f(points[3][0], points[3][1]);
|
||||
|
||||
cv::Mat M = cv::getPerspectiveTransform(pointsf, pts_std);
|
||||
|
||||
cv::Mat dst_img;
|
||||
cv::warpPerspective(img_crop, dst_img, M,
|
||||
cv::Size(img_crop_width, img_crop_height),
|
||||
cv::BORDER_REPLICATE);
|
||||
|
||||
if (float(dst_img.rows) >= float(dst_img.cols) * 1.5) {
|
||||
cv::Mat srcCopy = cv::Mat(dst_img.rows, dst_img.cols, dst_img.depth());
|
||||
cv::transpose(dst_img, srcCopy);
|
||||
cv::flip(srcCopy, srcCopy, 0);
|
||||
return srcCopy;
|
||||
} else {
|
||||
return dst_img;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int main_det(int argc, char **argv) {
|
||||
// Parsing command-line
|
||||
google::ParseCommandLineFlags(&argc, &argv, true);
|
||||
if (FLAGS_det_model_dir.empty() || FLAGS_image_dir.empty()) {
|
||||
std::cout << "Usage[det]: ./ppocr --det_model_dir=/PATH/TO/DET_INFERENCE_MODEL/ "
|
||||
<< "--image_dir=/PATH/TO/INPUT/IMAGE/" << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
if (!PathExists(FLAGS_image_dir)) {
|
||||
std::cerr << "[ERROR] image path not exist! image_dir: " << FLAGS_image_dir << endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
std::vector<cv::String> cv_all_img_names;
|
||||
cv::glob(FLAGS_image_dir, cv_all_img_names);
|
||||
std::cout << "total images num: " << cv_all_img_names.size() << endl;
|
||||
|
||||
DBDetector det(FLAGS_det_model_dir, FLAGS_use_gpu, FLAGS_gpu_id,
|
||||
FLAGS_gpu_mem, FLAGS_cpu_math_library_num_threads,
|
||||
FLAGS_use_mkldnn, FLAGS_max_side_len, FLAGS_det_db_thresh,
|
||||
FLAGS_det_db_box_thresh, FLAGS_det_db_unclip_ratio,
|
||||
FLAGS_use_polygon_score, FLAGS_visualize,
|
||||
FLAGS_use_tensorrt, FLAGS_use_fp16);
|
||||
|
||||
auto start = std::chrono::system_clock::now();
|
||||
|
||||
for (int i = 0; i < cv_all_img_names.size(); ++i) {
|
||||
LOG(INFO) << "The predict img: " << cv_all_img_names[i];
|
||||
|
||||
cv::Mat srcimg = cv::imread(cv_all_img_names[i], cv::IMREAD_COLOR);
|
||||
if (!srcimg.data) {
|
||||
std::cerr << "[ERROR] image read failed! image path: " << cv_all_img_names[i] << endl;
|
||||
exit(1);
|
||||
}
|
||||
std::vector<std::vector<std::vector<int>>> boxes;
|
||||
|
||||
det.Run(srcimg, boxes);
|
||||
|
||||
auto end = std::chrono::system_clock::now();
|
||||
auto duration =
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(end - start);
|
||||
std::cout << "Cost "
|
||||
<< double(duration.count()) *
|
||||
std::chrono::microseconds::period::num /
|
||||
std::chrono::microseconds::period::den
|
||||
<< "s" << std::endl;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int main_rec(int argc, char **argv) {
|
||||
// Parsing command-line
|
||||
google::ParseCommandLineFlags(&argc, &argv, true);
|
||||
if (FLAGS_rec_model_dir.empty() || FLAGS_image_dir.empty()) {
|
||||
std::cout << "Usage[rec]: ./ppocr --rec_model_dir=/PATH/TO/REC_INFERENCE_MODEL/ "
|
||||
<< "--image_dir=/PATH/TO/INPUT/IMAGE/" << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
if (!PathExists(FLAGS_image_dir)) {
|
||||
std::cerr << "[ERROR] image path not exist! image_dir: " << FLAGS_image_dir << endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
std::vector<cv::String> cv_all_img_names;
|
||||
cv::glob(FLAGS_image_dir, cv_all_img_names);
|
||||
std::cout << "total images num: " << cv_all_img_names.size() << endl;
|
||||
|
||||
CRNNRecognizer rec(FLAGS_rec_model_dir, FLAGS_use_gpu, FLAGS_gpu_id,
|
||||
FLAGS_gpu_mem, FLAGS_cpu_math_library_num_threads,
|
||||
FLAGS_use_mkldnn, FLAGS_char_list_file,
|
||||
FLAGS_use_tensorrt, FLAGS_use_fp16);
|
||||
|
||||
auto start = std::chrono::system_clock::now();
|
||||
|
||||
for (int i = 0; i < cv_all_img_names.size(); ++i) {
|
||||
LOG(INFO) << "The predict img: " << cv_all_img_names[i];
|
||||
|
||||
cv::Mat srcimg = cv::imread(cv_all_img_names[i], cv::IMREAD_COLOR);
|
||||
if (!srcimg.data) {
|
||||
std::cerr << "[ERROR] image read failed! image path: " << cv_all_img_names[i] << endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
rec.Run(srcimg);
|
||||
|
||||
auto end = std::chrono::system_clock::now();
|
||||
auto duration =
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(end - start);
|
||||
std::cout << "Cost "
|
||||
<< double(duration.count()) *
|
||||
std::chrono::microseconds::period::num /
|
||||
std::chrono::microseconds::period::den
|
||||
<< "s" << std::endl;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int main_system(int argc, char **argv) {
|
||||
// Parsing command-line
|
||||
google::ParseCommandLineFlags(&argc, &argv, true);
|
||||
if ((FLAGS_det_model_dir.empty() || FLAGS_rec_model_dir.empty() || FLAGS_image_dir.empty()) ||
|
||||
(FLAGS_use_angle_cls && FLAGS_cls_model_dir.empty())) {
|
||||
std::cout << "Usage[system without angle cls]: ./ppocr --det_model_dir=/PATH/TO/DET_INFERENCE_MODEL/ "
|
||||
<< "--rec_model_dir=/PATH/TO/REC_INFERENCE_MODEL/ "
|
||||
<< "--image_dir=/PATH/TO/INPUT/IMAGE/" << std::endl;
|
||||
std::cout << "Usage[system with angle cls]: ./ppocr --det_model_dir=/PATH/TO/DET_INFERENCE_MODEL/ "
|
||||
<< "--use_angle_cls=true "
|
||||
<< "--cls_model_dir=/PATH/TO/CLS_INFERENCE_MODEL/ "
|
||||
<< "--rec_model_dir=/PATH/TO/REC_INFERENCE_MODEL/ "
|
||||
<< "--image_dir=/PATH/TO/INPUT/IMAGE/" << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
if (!PathExists(FLAGS_image_dir)) {
|
||||
std::cerr << "[ERROR] image path not exist! image_dir: " << FLAGS_image_dir << endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
std::vector<cv::String> cv_all_img_names;
|
||||
cv::glob(FLAGS_image_dir, cv_all_img_names);
|
||||
std::cout << "total images num: " << cv_all_img_names.size() << endl;
|
||||
|
||||
DBDetector det(FLAGS_det_model_dir, FLAGS_use_gpu, FLAGS_gpu_id,
|
||||
FLAGS_gpu_mem, FLAGS_cpu_math_library_num_threads,
|
||||
FLAGS_use_mkldnn, FLAGS_max_side_len, FLAGS_det_db_thresh,
|
||||
FLAGS_det_db_box_thresh, FLAGS_det_db_unclip_ratio,
|
||||
FLAGS_use_polygon_score, FLAGS_visualize,
|
||||
FLAGS_use_tensorrt, FLAGS_use_fp16);
|
||||
|
||||
Classifier *cls = nullptr;
|
||||
if (FLAGS_use_angle_cls) {
|
||||
cls = new Classifier(FLAGS_cls_model_dir, FLAGS_use_gpu, FLAGS_gpu_id,
|
||||
FLAGS_gpu_mem, FLAGS_cpu_math_library_num_threads,
|
||||
FLAGS_use_mkldnn, FLAGS_cls_thresh,
|
||||
FLAGS_use_tensorrt, FLAGS_use_fp16);
|
||||
}
|
||||
|
||||
CRNNRecognizer rec(FLAGS_rec_model_dir, FLAGS_use_gpu, FLAGS_gpu_id,
|
||||
FLAGS_gpu_mem, FLAGS_cpu_math_library_num_threads,
|
||||
FLAGS_use_mkldnn, FLAGS_char_list_file,
|
||||
FLAGS_use_tensorrt, FLAGS_use_fp16);
|
||||
|
||||
auto start = std::chrono::system_clock::now();
|
||||
|
||||
for (int i = 0; i < cv_all_img_names.size(); ++i) {
|
||||
LOG(INFO) << "The predict img: " << cv_all_img_names[i];
|
||||
|
||||
cv::Mat srcimg = cv::imread(FLAGS_image_dir, cv::IMREAD_COLOR);
|
||||
if (!srcimg.data) {
|
||||
std::cerr << "[ERROR] image read failed! image path: " << cv_all_img_names[i] << endl;
|
||||
exit(1);
|
||||
}
|
||||
std::vector<std::vector<std::vector<int>>> boxes;
|
||||
|
||||
det.Run(srcimg, boxes);
|
||||
|
||||
cv::Mat crop_img;
|
||||
for (int j = 0; j < boxes.size(); j++) {
|
||||
crop_img = GetRotateCropImage(srcimg, boxes[j]);
|
||||
|
||||
if (cls != nullptr) {
|
||||
crop_img = cls->Run(crop_img);
|
||||
}
|
||||
rec.Run(crop_img);
|
||||
}
|
||||
|
||||
auto end = std::chrono::system_clock::now();
|
||||
auto duration =
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(end - start);
|
||||
std::cout << "Cost "
|
||||
<< double(duration.count()) *
|
||||
std::chrono::microseconds::period::num /
|
||||
std::chrono::microseconds::period::den
|
||||
<< "s" << std::endl;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
if (strcmp(argv[1], "det")!=0 && strcmp(argv[1], "rec")!=0 && strcmp(argv[1], "system")!=0) {
|
||||
std::cout << "Please choose one mode of [det, rec, system] !" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
std::cout << "mode: " << argv[1] << endl;
|
||||
|
||||
if (strcmp(argv[1], "det")==0) {
|
||||
return main_det(argc, argv);
|
||||
}
|
||||
if (strcmp(argv[1], "rec")==0) {
|
||||
return main_rec(argc, argv);
|
||||
}
|
||||
if (strcmp(argv[1], "system")==0) {
|
||||
return main_system(argc, argv);
|
||||
}
|
||||
|
||||
// return 0;
|
||||
}
|
|
@ -13,8 +13,7 @@
|
|||
// limitations under the License.
|
||||
|
||||
#include <include/ocr_det.h>
|
||||
#include <include/preprocess_op.cpp>
|
||||
#include <include/postprocess_op.cpp>
|
||||
|
||||
|
||||
namespace PaddleOCR {
|
||||
|
|
@ -1,120 +0,0 @@
|
|||
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "glog/logging.h"
|
||||
#include "omp.h"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include <chrono>
|
||||
#include <iomanip>
|
||||
#include <iostream>
|
||||
#include <ostream>
|
||||
#include <vector>
|
||||
|
||||
#include <cstring>
|
||||
#include <fstream>
|
||||
#include <numeric>
|
||||
|
||||
#include <glog/logging.h>
|
||||
#include <include/ocr_det.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include <gflags/gflags.h>
|
||||
|
||||
DEFINE_bool(use_gpu, false, "Infering with GPU or CPU.");
|
||||
DEFINE_int32(gpu_id, 0, "Device id of GPU to execute.");
|
||||
DEFINE_int32(gpu_mem, 4000, "GPU id when infering with GPU.");
|
||||
DEFINE_int32(cpu_math_library_num_threads, 10, "Num of threads with CPU.");
|
||||
DEFINE_bool(use_mkldnn, false, "Whether use mkldnn with CPU.");
|
||||
|
||||
DEFINE_string(image_dir, "", "Dir of input image.");
|
||||
DEFINE_string(det_model_dir, "", "Path of det inference model.");
|
||||
DEFINE_int32(max_side_len, 960, "max_side_len of input image.");
|
||||
DEFINE_double(det_db_thresh, 0.3, "Threshold of det_db_thresh.");
|
||||
DEFINE_double(det_db_box_thresh, 0.5, "Threshold of det_db_box_thresh.");
|
||||
DEFINE_double(det_db_unclip_ratio, 1.6, "Threshold of det_db_unclip_ratio.");
|
||||
DEFINE_bool(use_polygon_score, false, "Whether use polygon score.");
|
||||
DEFINE_bool(visualize, true, "Whether show the detection results.");
|
||||
|
||||
DEFINE_bool(use_tensorrt, false, "Whether use tensorrt.");
|
||||
DEFINE_bool(use_fp16, false, "Whether use fp16 when use tensorrt.");
|
||||
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
using namespace PaddleOCR;
|
||||
|
||||
|
||||
static bool PathExists(const std::string& path){
|
||||
#ifdef _WIN32
|
||||
struct _stat buffer;
|
||||
return (_stat(path.c_str(), &buffer) == 0);
|
||||
#else
|
||||
struct stat buffer;
|
||||
return (stat(path.c_str(), &buffer) == 0);
|
||||
#endif // !_WIN32
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
// Parsing command-line
|
||||
google::ParseCommandLineFlags(&argc, &argv, true);
|
||||
if (FLAGS_det_model_dir.empty() || FLAGS_image_dir.empty()) {
|
||||
std::cout << "Usage: ./ocr_det --det_model_dir=/PATH/TO/INFERENCE_MODEL/ "
|
||||
<< "--image_dir=/PATH/TO/INPUT/IMAGE/" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!PathExists(FLAGS_image_dir)) {
|
||||
std::cerr << "[ERROR] image path not exist! image_dir: " << FLAGS_image_dir << endl;
|
||||
exit(1);
|
||||
}
|
||||
std::vector<cv::String> cv_all_img_names;
|
||||
cv::glob(FLAGS_image_dir, cv_all_img_names);
|
||||
std::cout << "total images num: " << cv_all_img_names.size() << endl;
|
||||
|
||||
DBDetector det(FLAGS_det_model_dir, FLAGS_use_gpu, FLAGS_gpu_id,
|
||||
FLAGS_gpu_mem, FLAGS_cpu_math_library_num_threads,
|
||||
FLAGS_use_mkldnn, FLAGS_max_side_len, FLAGS_det_db_thresh,
|
||||
FLAGS_det_db_box_thresh, FLAGS_det_db_unclip_ratio,
|
||||
FLAGS_use_polygon_score, FLAGS_visualize,
|
||||
FLAGS_use_tensorrt, FLAGS_use_fp16);
|
||||
|
||||
auto start = std::chrono::system_clock::now();
|
||||
|
||||
for (int i = 0; i < cv_all_img_names.size(); ++i) {
|
||||
LOG(INFO) << "The predict img: " << cv_all_img_names[i];
|
||||
|
||||
cv::Mat srcimg = cv::imread(cv_all_img_names[i], cv::IMREAD_COLOR);
|
||||
if (!srcimg.data) {
|
||||
std::cerr << "[ERROR] image read failed! image path: " << cv_all_img_names[i] << endl;
|
||||
exit(1);
|
||||
}
|
||||
std::vector<std::vector<std::vector<int>>> boxes;
|
||||
|
||||
det.Run(srcimg, boxes);
|
||||
|
||||
auto end = std::chrono::system_clock::now();
|
||||
auto duration =
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(end - start);
|
||||
std::cout << "Cost "
|
||||
<< double(duration.count()) *
|
||||
std::chrono::microseconds::period::num /
|
||||
std::chrono::microseconds::period::den
|
||||
<< "s" << std::endl;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -1,112 +0,0 @@
|
|||
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "glog/logging.h"
|
||||
#include "omp.h"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include <chrono>
|
||||
#include <iomanip>
|
||||
#include <iostream>
|
||||
#include <ostream>
|
||||
#include <vector>
|
||||
|
||||
#include <cstring>
|
||||
#include <fstream>
|
||||
#include <numeric>
|
||||
|
||||
#include <glog/logging.h>
|
||||
#include <include/ocr_rec.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include <gflags/gflags.h>
|
||||
|
||||
DEFINE_bool(use_gpu, false, "Infering with GPU or CPU.");
|
||||
DEFINE_int32(gpu_id, 0, "Device id of GPU to execute.");
|
||||
DEFINE_int32(gpu_mem, 4000, "GPU id when infering with GPU.");
|
||||
DEFINE_int32(cpu_math_library_num_threads, 10, "Num of threads with CPU.");
|
||||
DEFINE_bool(use_mkldnn, false, "Whether use mkldnn with CPU.");
|
||||
|
||||
DEFINE_string(image_dir, "", "Dir of input image.");
|
||||
DEFINE_string(rec_model_dir, "", "Path of rec inference model.");
|
||||
DEFINE_string(char_list_file, "../../ppocr/utils/ppocr_keys_v1.txt", "Path of dictionary.");
|
||||
|
||||
DEFINE_bool(use_tensorrt, false, "Whether use tensorrt.");
|
||||
DEFINE_bool(use_fp16, false, "Whether use fp16 when use tensorrt.");
|
||||
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
using namespace PaddleOCR;
|
||||
|
||||
|
||||
static bool PathExists(const std::string& path){
|
||||
#ifdef _WIN32
|
||||
struct _stat buffer;
|
||||
return (_stat(path.c_str(), &buffer) == 0);
|
||||
#else
|
||||
struct stat buffer;
|
||||
return (stat(path.c_str(), &buffer) == 0);
|
||||
#endif // !_WIN32
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
// Parsing command-line
|
||||
google::ParseCommandLineFlags(&argc, &argv, true);
|
||||
if (FLAGS_rec_model_dir.empty() || FLAGS_image_dir.empty()) {
|
||||
std::cout << "Usage: ./ocr_rec --rec_model_dir=/PATH/TO/INFERENCE_MODEL/ "
|
||||
<< "--image_dir=/PATH/TO/INPUT/IMAGE/" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!PathExists(FLAGS_image_dir)) {
|
||||
std::cerr << "[ERROR] image path not exist! image_dir: " << FLAGS_image_dir << endl;
|
||||
exit(1);
|
||||
}
|
||||
std::vector<cv::String> cv_all_img_names;
|
||||
cv::glob(FLAGS_image_dir, cv_all_img_names);
|
||||
std::cout << "total images num: " << cv_all_img_names.size() << endl;
|
||||
|
||||
CRNNRecognizer rec(FLAGS_rec_model_dir, FLAGS_use_gpu, FLAGS_gpu_id,
|
||||
FLAGS_gpu_mem, FLAGS_cpu_math_library_num_threads,
|
||||
FLAGS_use_mkldnn, FLAGS_char_list_file,
|
||||
FLAGS_use_tensorrt, FLAGS_use_fp16);
|
||||
|
||||
auto start = std::chrono::system_clock::now();
|
||||
|
||||
for (int i = 0; i < cv_all_img_names.size(); ++i) {
|
||||
LOG(INFO) << "The predict img: " << cv_all_img_names[i];
|
||||
|
||||
cv::Mat srcimg = cv::imread(cv_all_img_names[i], cv::IMREAD_COLOR);
|
||||
if (!srcimg.data) {
|
||||
std::cerr << "[ERROR] image read failed! image path: " << cv_all_img_names[i] << endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
rec.Run(srcimg);
|
||||
|
||||
auto end = std::chrono::system_clock::now();
|
||||
auto duration =
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(end - start);
|
||||
std::cout << "Cost "
|
||||
<< double(duration.count()) *
|
||||
std::chrono::microseconds::period::num /
|
||||
std::chrono::microseconds::period::den
|
||||
<< "s" << std::endl;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -1,185 +0,0 @@
|
|||
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include <include/ocr_rec.h>
|
||||
#include <include/preprocess_op.cpp>
|
||||
|
||||
namespace PaddleOCR {
|
||||
|
||||
void CRNNRecognizer::Run(cv::Mat &img) {
|
||||
cv::Mat srcimg;
|
||||
img.copyTo(srcimg);
|
||||
cv::Mat resize_img;
|
||||
|
||||
float wh_ratio = float(srcimg.cols) / float(srcimg.rows);
|
||||
|
||||
this->resize_op_.Run(srcimg, resize_img, wh_ratio, this->use_tensorrt_);
|
||||
|
||||
this->normalize_op_.Run(&resize_img, this->mean_, this->scale_,
|
||||
this->is_scale_);
|
||||
|
||||
std::vector<float> input(1 * 3 * resize_img.rows * resize_img.cols, 0.0f);
|
||||
|
||||
this->permute_op_.Run(&resize_img, input.data());
|
||||
|
||||
// Inference.
|
||||
auto input_names = this->predictor_->GetInputNames();
|
||||
auto input_t = this->predictor_->GetInputHandle(input_names[0]);
|
||||
input_t->Reshape({1, 3, resize_img.rows, resize_img.cols});
|
||||
input_t->CopyFromCpu(input.data());
|
||||
this->predictor_->Run();
|
||||
|
||||
std::vector<float> predict_batch;
|
||||
auto output_names = this->predictor_->GetOutputNames();
|
||||
auto output_t = this->predictor_->GetOutputHandle(output_names[0]);
|
||||
auto predict_shape = output_t->shape();
|
||||
|
||||
int out_num = std::accumulate(predict_shape.begin(), predict_shape.end(), 1,
|
||||
std::multiplies<int>());
|
||||
predict_batch.resize(out_num);
|
||||
|
||||
output_t->CopyToCpu(predict_batch.data());
|
||||
|
||||
// ctc decode
|
||||
std::vector<std::string> str_res;
|
||||
int argmax_idx;
|
||||
int last_index = 0;
|
||||
float score = 0.f;
|
||||
int count = 0;
|
||||
float max_value = 0.0f;
|
||||
|
||||
for (int n = 0; n < predict_shape[1]; n++) {
|
||||
argmax_idx =
|
||||
int(Utility::argmax(&predict_batch[n * predict_shape[2]],
|
||||
&predict_batch[(n + 1) * predict_shape[2]]));
|
||||
max_value =
|
||||
float(*std::max_element(&predict_batch[n * predict_shape[2]],
|
||||
&predict_batch[(n + 1) * predict_shape[2]]));
|
||||
|
||||
if (argmax_idx > 0 && (!(n > 0 && argmax_idx == last_index))) {
|
||||
score += max_value;
|
||||
count += 1;
|
||||
str_res.push_back(label_list_[argmax_idx]);
|
||||
}
|
||||
last_index = argmax_idx;
|
||||
}
|
||||
score /= count;
|
||||
for (int i = 0; i < str_res.size(); i++) {
|
||||
std::cout << str_res[i];
|
||||
}
|
||||
std::cout << "\tscore: " << score << std::endl;
|
||||
}
|
||||
|
||||
void CRNNRecognizer::LoadModel(const std::string &model_dir) {
|
||||
// AnalysisConfig config;
|
||||
paddle_infer::Config config;
|
||||
config.SetModel(model_dir + "/inference.pdmodel",
|
||||
model_dir + "/inference.pdiparams");
|
||||
|
||||
if (this->use_gpu_) {
|
||||
config.EnableUseGpu(this->gpu_mem_, this->gpu_id_);
|
||||
if (this->use_tensorrt_) {
|
||||
config.EnableTensorRtEngine(
|
||||
1 << 20, 10, 3,
|
||||
this->use_fp16_ ? paddle_infer::Config::Precision::kHalf
|
||||
: paddle_infer::Config::Precision::kFloat32,
|
||||
false, false);
|
||||
std::map<std::string, std::vector<int>> min_input_shape = {
|
||||
{"x", {1, 3, 32, 10}}};
|
||||
std::map<std::string, std::vector<int>> max_input_shape = {
|
||||
{"x", {1, 3, 32, 2000}}};
|
||||
std::map<std::string, std::vector<int>> opt_input_shape = {
|
||||
{"x", {1, 3, 32, 320}}};
|
||||
|
||||
config.SetTRTDynamicShapeInfo(min_input_shape, max_input_shape,
|
||||
opt_input_shape);
|
||||
}
|
||||
} else {
|
||||
config.DisableGpu();
|
||||
if (this->use_mkldnn_) {
|
||||
config.EnableMKLDNN();
|
||||
// cache 10 different shapes for mkldnn to avoid memory leak
|
||||
config.SetMkldnnCacheCapacity(10);
|
||||
}
|
||||
config.SetCpuMathLibraryNumThreads(this->cpu_math_library_num_threads_);
|
||||
}
|
||||
|
||||
config.SwitchUseFeedFetchOps(false);
|
||||
// true for multiple input
|
||||
config.SwitchSpecifyInputNames(true);
|
||||
|
||||
config.SwitchIrOptim(true);
|
||||
|
||||
config.EnableMemoryOptim();
|
||||
config.DisableGlogInfo();
|
||||
|
||||
this->predictor_ = CreatePredictor(config);
|
||||
}
|
||||
|
||||
cv::Mat CRNNRecognizer::GetRotateCropImage(const cv::Mat &srcimage,
|
||||
std::vector<std::vector<int>> box) {
|
||||
cv::Mat image;
|
||||
srcimage.copyTo(image);
|
||||
std::vector<std::vector<int>> points = box;
|
||||
|
||||
int x_collect[4] = {box[0][0], box[1][0], box[2][0], box[3][0]};
|
||||
int y_collect[4] = {box[0][1], box[1][1], box[2][1], box[3][1]};
|
||||
int left = int(*std::min_element(x_collect, x_collect + 4));
|
||||
int right = int(*std::max_element(x_collect, x_collect + 4));
|
||||
int top = int(*std::min_element(y_collect, y_collect + 4));
|
||||
int bottom = int(*std::max_element(y_collect, y_collect + 4));
|
||||
|
||||
cv::Mat img_crop;
|
||||
image(cv::Rect(left, top, right - left, bottom - top)).copyTo(img_crop);
|
||||
|
||||
for (int i = 0; i < points.size(); i++) {
|
||||
points[i][0] -= left;
|
||||
points[i][1] -= top;
|
||||
}
|
||||
|
||||
int img_crop_width = int(sqrt(pow(points[0][0] - points[1][0], 2) +
|
||||
pow(points[0][1] - points[1][1], 2)));
|
||||
int img_crop_height = int(sqrt(pow(points[0][0] - points[3][0], 2) +
|
||||
pow(points[0][1] - points[3][1], 2)));
|
||||
|
||||
cv::Point2f pts_std[4];
|
||||
pts_std[0] = cv::Point2f(0., 0.);
|
||||
pts_std[1] = cv::Point2f(img_crop_width, 0.);
|
||||
pts_std[2] = cv::Point2f(img_crop_width, img_crop_height);
|
||||
pts_std[3] = cv::Point2f(0.f, img_crop_height);
|
||||
|
||||
cv::Point2f pointsf[4];
|
||||
pointsf[0] = cv::Point2f(points[0][0], points[0][1]);
|
||||
pointsf[1] = cv::Point2f(points[1][0], points[1][1]);
|
||||
pointsf[2] = cv::Point2f(points[2][0], points[2][1]);
|
||||
pointsf[3] = cv::Point2f(points[3][0], points[3][1]);
|
||||
|
||||
cv::Mat M = cv::getPerspectiveTransform(pointsf, pts_std);
|
||||
|
||||
cv::Mat dst_img;
|
||||
cv::warpPerspective(img_crop, dst_img, M,
|
||||
cv::Size(img_crop_width, img_crop_height),
|
||||
cv::BORDER_REPLICATE);
|
||||
|
||||
if (float(dst_img.rows) >= float(dst_img.cols) * 1.5) {
|
||||
cv::Mat srcCopy = cv::Mat(dst_img.rows, dst_img.cols, dst_img.depth());
|
||||
cv::transpose(dst_img, srcCopy);
|
||||
cv::flip(srcCopy, srcCopy, 0);
|
||||
return srcCopy;
|
||||
} else {
|
||||
return dst_img;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace PaddleOCR
|
|
@ -1,213 +0,0 @@
|
|||
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "glog/logging.h"
|
||||
#include "omp.h"
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include <chrono>
|
||||
#include <iomanip>
|
||||
#include <iostream>
|
||||
#include <ostream>
|
||||
#include <vector>
|
||||
|
||||
#include <cstring>
|
||||
#include <fstream>
|
||||
#include <numeric>
|
||||
|
||||
#include <glog/logging.h>
|
||||
#include <include/ocr_det.h>
|
||||
#include <include/ocr_rec.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include <gflags/gflags.h>
|
||||
|
||||
DEFINE_bool(use_gpu, false, "Infering with GPU or CPU.");
|
||||
DEFINE_int32(gpu_id, 0, "Device id of GPU to execute.");
|
||||
DEFINE_int32(gpu_mem, 4000, "GPU id when infering with GPU.");
|
||||
DEFINE_int32(cpu_math_library_num_threads, 10, "Num of threads with CPU.");
|
||||
DEFINE_bool(use_mkldnn, false, "Whether use mkldnn with CPU.");
|
||||
|
||||
DEFINE_string(image_dir, "", "Dir of input image.");
|
||||
DEFINE_string(det_model_dir, "", "Path of det inference model.");
|
||||
DEFINE_int32(max_side_len, 960, "max_side_len of input image.");
|
||||
DEFINE_double(det_db_thresh, 0.3, "Threshold of det_db_thresh.");
|
||||
DEFINE_double(det_db_box_thresh, 0.5, "Threshold of det_db_box_thresh.");
|
||||
DEFINE_double(det_db_unclip_ratio, 1.6, "Threshold of det_db_unclip_ratio.");
|
||||
DEFINE_bool(use_polygon_score, false, "Whether use polygon score.");
|
||||
DEFINE_bool(visualize, true, "Whether show the detection results.");
|
||||
|
||||
DEFINE_bool(use_angle_cls, false, "Whether use use_angle_cls.");
|
||||
DEFINE_string(cls_model_dir, "", "Path of cls inference model.");
|
||||
DEFINE_double(cls_thresh, 0.9, "Threshold of cls_thresh.");
|
||||
|
||||
DEFINE_string(rec_model_dir, "", "Path of rec inference model.");
|
||||
DEFINE_string(char_list_file, "../../ppocr/utils/ppocr_keys_v1.txt", "Path of dictionary.");
|
||||
|
||||
DEFINE_bool(use_tensorrt, false, "Whether use tensorrt.");
|
||||
DEFINE_bool(use_fp16, false, "Whether use fp16 when use tensorrt.");
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
using namespace PaddleOCR;
|
||||
|
||||
|
||||
static bool PathExists(const std::string& path){
|
||||
#ifdef _WIN32
|
||||
struct _stat buffer;
|
||||
return (_stat(path.c_str(), &buffer) == 0);
|
||||
#else
|
||||
struct stat buffer;
|
||||
return (stat(path.c_str(), &buffer) == 0);
|
||||
#endif // !_WIN32
|
||||
}
|
||||
|
||||
|
||||
cv::Mat GetRotateCropImage(const cv::Mat &srcimage,
|
||||
std::vector<std::vector<int>> box) {
|
||||
cv::Mat image;
|
||||
srcimage.copyTo(image);
|
||||
std::vector<std::vector<int>> points = box;
|
||||
|
||||
int x_collect[4] = {box[0][0], box[1][0], box[2][0], box[3][0]};
|
||||
int y_collect[4] = {box[0][1], box[1][1], box[2][1], box[3][1]};
|
||||
int left = int(*std::min_element(x_collect, x_collect + 4));
|
||||
int right = int(*std::max_element(x_collect, x_collect + 4));
|
||||
int top = int(*std::min_element(y_collect, y_collect + 4));
|
||||
int bottom = int(*std::max_element(y_collect, y_collect + 4));
|
||||
|
||||
cv::Mat img_crop;
|
||||
image(cv::Rect(left, top, right - left, bottom - top)).copyTo(img_crop);
|
||||
|
||||
for (int i = 0; i < points.size(); i++) {
|
||||
points[i][0] -= left;
|
||||
points[i][1] -= top;
|
||||
}
|
||||
|
||||
int img_crop_width = int(sqrt(pow(points[0][0] - points[1][0], 2) +
|
||||
pow(points[0][1] - points[1][1], 2)));
|
||||
int img_crop_height = int(sqrt(pow(points[0][0] - points[3][0], 2) +
|
||||
pow(points[0][1] - points[3][1], 2)));
|
||||
|
||||
cv::Point2f pts_std[4];
|
||||
pts_std[0] = cv::Point2f(0., 0.);
|
||||
pts_std[1] = cv::Point2f(img_crop_width, 0.);
|
||||
pts_std[2] = cv::Point2f(img_crop_width, img_crop_height);
|
||||
pts_std[3] = cv::Point2f(0.f, img_crop_height);
|
||||
|
||||
cv::Point2f pointsf[4];
|
||||
pointsf[0] = cv::Point2f(points[0][0], points[0][1]);
|
||||
pointsf[1] = cv::Point2f(points[1][0], points[1][1]);
|
||||
pointsf[2] = cv::Point2f(points[2][0], points[2][1]);
|
||||
pointsf[3] = cv::Point2f(points[3][0], points[3][1]);
|
||||
|
||||
cv::Mat M = cv::getPerspectiveTransform(pointsf, pts_std);
|
||||
|
||||
cv::Mat dst_img;
|
||||
cv::warpPerspective(img_crop, dst_img, M,
|
||||
cv::Size(img_crop_width, img_crop_height),
|
||||
cv::BORDER_REPLICATE);
|
||||
|
||||
if (float(dst_img.rows) >= float(dst_img.cols) * 1.5) {
|
||||
cv::Mat srcCopy = cv::Mat(dst_img.rows, dst_img.cols, dst_img.depth());
|
||||
cv::transpose(dst_img, srcCopy);
|
||||
cv::flip(srcCopy, srcCopy, 0);
|
||||
return srcCopy;
|
||||
} else {
|
||||
return dst_img;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
// Parsing command-line
|
||||
google::ParseCommandLineFlags(&argc, &argv, true);
|
||||
if ((FLAGS_det_model_dir.empty() || FLAGS_rec_model_dir.empty() || FLAGS_image_dir.empty()) ||
|
||||
(FLAGS_use_angle_cls && FLAGS_cls_model_dir.empty())) {
|
||||
std::cout << "Usage[default]: ./ocr_system --det_model_dir=/PATH/TO/DET_INFERENCE_MODEL/ "
|
||||
<< "--rec_model_dir=/PATH/TO/REC_INFERENCE_MODEL/ "
|
||||
<< "--image_dir=/PATH/TO/INPUT/IMAGE/" << std::endl;
|
||||
std::cout << "Usage[use angle cls]: ./ocr_system --det_model_dir=/PATH/TO/DET_INFERENCE_MODEL/ "
|
||||
<< "--use_angle_cls=true "
|
||||
<< "--cls_model_dir=/PATH/TO/CLS_INFERENCE_MODEL/ "
|
||||
<< "--rec_model_dir=/PATH/TO/REC_INFERENCE_MODEL/ "
|
||||
<< "--image_dir=/PATH/TO/INPUT/IMAGE/" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!PathExists(FLAGS_image_dir)) {
|
||||
std::cerr << "[ERROR] image path not exist! image_dir: " << FLAGS_image_dir << endl;
|
||||
exit(1);
|
||||
}
|
||||
std::vector<cv::String> cv_all_img_names;
|
||||
cv::glob(FLAGS_image_dir, cv_all_img_names);
|
||||
std::cout << "total images num: " << cv_all_img_names.size() << endl;
|
||||
|
||||
DBDetector det(FLAGS_det_model_dir, FLAGS_use_gpu, FLAGS_gpu_id,
|
||||
FLAGS_gpu_mem, FLAGS_cpu_math_library_num_threads,
|
||||
FLAGS_use_mkldnn, FLAGS_max_side_len, FLAGS_det_db_thresh,
|
||||
FLAGS_det_db_box_thresh, FLAGS_det_db_unclip_ratio,
|
||||
FLAGS_use_polygon_score, FLAGS_visualize,
|
||||
FLAGS_use_tensorrt, FLAGS_use_fp16);
|
||||
|
||||
Classifier *cls = nullptr;
|
||||
if (FLAGS_use_angle_cls) {
|
||||
cls = new Classifier(FLAGS_cls_model_dir, FLAGS_use_gpu, FLAGS_gpu_id,
|
||||
FLAGS_gpu_mem, FLAGS_cpu_math_library_num_threads,
|
||||
FLAGS_use_mkldnn, FLAGS_cls_thresh,
|
||||
FLAGS_use_tensorrt, FLAGS_use_fp16);
|
||||
}
|
||||
|
||||
CRNNRecognizer rec(FLAGS_rec_model_dir, FLAGS_use_gpu, FLAGS_gpu_id,
|
||||
FLAGS_gpu_mem, FLAGS_cpu_math_library_num_threads,
|
||||
FLAGS_use_mkldnn, FLAGS_char_list_file,
|
||||
FLAGS_use_tensorrt, FLAGS_use_fp16);
|
||||
|
||||
auto start = std::chrono::system_clock::now();
|
||||
|
||||
for (int i = 0; i < cv_all_img_names.size(); ++i) {
|
||||
LOG(INFO) << "The predict img: " << cv_all_img_names[i];
|
||||
|
||||
cv::Mat srcimg = cv::imread(FLAGS_image_dir, cv::IMREAD_COLOR);
|
||||
if (!srcimg.data) {
|
||||
std::cerr << "[ERROR] image read failed! image path: " << cv_all_img_names[i] << endl;
|
||||
exit(1);
|
||||
}
|
||||
std::vector<std::vector<std::vector<int>>> boxes;
|
||||
|
||||
det.Run(srcimg, boxes);
|
||||
|
||||
cv::Mat crop_img;
|
||||
for (int j = 0; j < boxes.size(); j++) {
|
||||
crop_img = GetRotateCropImage(srcimg, boxes[j]);
|
||||
|
||||
if (cls != nullptr) {
|
||||
crop_img = cls->Run(crop_img);
|
||||
}
|
||||
rec.Run(crop_img);
|
||||
}
|
||||
|
||||
auto end = std::chrono::system_clock::now();
|
||||
auto duration =
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(end - start);
|
||||
std::cout << "Cost "
|
||||
<< double(duration.count()) *
|
||||
std::chrono::microseconds::period::num /
|
||||
std::chrono::microseconds::period::den
|
||||
<< "s" << std::endl;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -1,163 +0,0 @@
|
|||
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include <include/ocr_det.h>
|
||||
#include <include/preprocess_op.cpp>
|
||||
#include <include/postprocess_op.cpp>
|
||||
|
||||
namespace PaddleOCR {
|
||||
|
||||
void DBDetector::LoadModel(const std::string &model_dir) {
|
||||
// AnalysisConfig config;
|
||||
paddle_infer::Config config;
|
||||
config.SetModel(model_dir + "/inference.pdmodel",
|
||||
model_dir + "/inference.pdiparams");
|
||||
|
||||
if (this->use_gpu_) {
|
||||
config.EnableUseGpu(this->gpu_mem_, this->gpu_id_);
|
||||
if (this->use_tensorrt_) {
|
||||
config.EnableTensorRtEngine(
|
||||
1 << 20, 10, 3,
|
||||
this->use_fp16_ ? paddle_infer::Config::Precision::kHalf
|
||||
: paddle_infer::Config::Precision::kFloat32,
|
||||
false, false);
|
||||
std::map<std::string, std::vector<int>> min_input_shape = {
|
||||
{"x", {1, 3, 50, 50}},
|
||||
{"conv2d_92.tmp_0", {1, 96, 20, 20}},
|
||||
{"conv2d_91.tmp_0", {1, 96, 10, 10}},
|
||||
{"nearest_interp_v2_1.tmp_0", {1, 96, 10, 10}},
|
||||
{"nearest_interp_v2_2.tmp_0", {1, 96, 20, 20}},
|
||||
{"nearest_interp_v2_3.tmp_0", {1, 24, 20, 20}},
|
||||
{"nearest_interp_v2_4.tmp_0", {1, 24, 20, 20}},
|
||||
{"nearest_interp_v2_5.tmp_0", {1, 24, 20, 20}},
|
||||
{"elementwise_add_7", {1, 56, 2, 2}},
|
||||
{"nearest_interp_v2_0.tmp_0", {1, 96, 2, 2}}};
|
||||
std::map<std::string, std::vector<int>> max_input_shape = {
|
||||
{"x", {1, 3, this->max_side_len_, this->max_side_len_}},
|
||||
{"conv2d_92.tmp_0", {1, 96, 400, 400}},
|
||||
{"conv2d_91.tmp_0", {1, 96, 200, 200}},
|
||||
{"nearest_interp_v2_1.tmp_0", {1, 96, 200, 200}},
|
||||
{"nearest_interp_v2_2.tmp_0", {1, 96, 400, 400}},
|
||||
{"nearest_interp_v2_3.tmp_0", {1, 24, 400, 400}},
|
||||
{"nearest_interp_v2_4.tmp_0", {1, 24, 400, 400}},
|
||||
{"nearest_interp_v2_5.tmp_0", {1, 24, 400, 400}},
|
||||
{"elementwise_add_7", {1, 56, 400, 400}},
|
||||
{"nearest_interp_v2_0.tmp_0", {1, 96, 400, 400}}};
|
||||
std::map<std::string, std::vector<int>> opt_input_shape = {
|
||||
{"x", {1, 3, 640, 640}},
|
||||
{"conv2d_92.tmp_0", {1, 96, 160, 160}},
|
||||
{"conv2d_91.tmp_0", {1, 96, 80, 80}},
|
||||
{"nearest_interp_v2_1.tmp_0", {1, 96, 80, 80}},
|
||||
{"nearest_interp_v2_2.tmp_0", {1, 96, 160, 160}},
|
||||
{"nearest_interp_v2_3.tmp_0", {1, 24, 160, 160}},
|
||||
{"nearest_interp_v2_4.tmp_0", {1, 24, 160, 160}},
|
||||
{"nearest_interp_v2_5.tmp_0", {1, 24, 160, 160}},
|
||||
{"elementwise_add_7", {1, 56, 40, 40}},
|
||||
{"nearest_interp_v2_0.tmp_0", {1, 96, 40, 40}}};
|
||||
|
||||
config.SetTRTDynamicShapeInfo(min_input_shape, max_input_shape,
|
||||
opt_input_shape);
|
||||
}
|
||||
} else {
|
||||
config.DisableGpu();
|
||||
if (this->use_mkldnn_) {
|
||||
config.EnableMKLDNN();
|
||||
// cache 10 different shapes for mkldnn to avoid memory leak
|
||||
config.SetMkldnnCacheCapacity(10);
|
||||
}
|
||||
config.SetCpuMathLibraryNumThreads(this->cpu_math_library_num_threads_);
|
||||
}
|
||||
// use zero_copy_run as default
|
||||
config.SwitchUseFeedFetchOps(false);
|
||||
// true for multiple input
|
||||
config.SwitchSpecifyInputNames(true);
|
||||
|
||||
config.SwitchIrOptim(true);
|
||||
|
||||
config.EnableMemoryOptim();
|
||||
// config.DisableGlogInfo();
|
||||
|
||||
this->predictor_ = CreatePredictor(config);
|
||||
}
|
||||
|
||||
void DBDetector::Run(cv::Mat &img,
|
||||
std::vector<std::vector<std::vector<int>>> &boxes) {
|
||||
float ratio_h{};
|
||||
float ratio_w{};
|
||||
|
||||
cv::Mat srcimg;
|
||||
cv::Mat resize_img;
|
||||
img.copyTo(srcimg);
|
||||
this->resize_op_.Run(img, resize_img, this->max_side_len_, ratio_h, ratio_w,
|
||||
this->use_tensorrt_);
|
||||
|
||||
this->normalize_op_.Run(&resize_img, this->mean_, this->scale_,
|
||||
this->is_scale_);
|
||||
|
||||
std::vector<float> input(1 * 3 * resize_img.rows * resize_img.cols, 0.0f);
|
||||
this->permute_op_.Run(&resize_img, input.data());
|
||||
|
||||
// Inference.
|
||||
auto input_names = this->predictor_->GetInputNames();
|
||||
auto input_t = this->predictor_->GetInputHandle(input_names[0]);
|
||||
input_t->Reshape({1, 3, resize_img.rows, resize_img.cols});
|
||||
input_t->CopyFromCpu(input.data());
|
||||
this->predictor_->Run();
|
||||
|
||||
std::vector<float> out_data;
|
||||
auto output_names = this->predictor_->GetOutputNames();
|
||||
auto output_t = this->predictor_->GetOutputHandle(output_names[0]);
|
||||
std::vector<int> output_shape = output_t->shape();
|
||||
int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1,
|
||||
std::multiplies<int>());
|
||||
|
||||
out_data.resize(out_num);
|
||||
output_t->CopyToCpu(out_data.data());
|
||||
|
||||
int n2 = output_shape[2];
|
||||
int n3 = output_shape[3];
|
||||
int n = n2 * n3;
|
||||
|
||||
std::vector<float> pred(n, 0.0);
|
||||
std::vector<unsigned char> cbuf(n, ' ');
|
||||
|
||||
for (int i = 0; i < n; i++) {
|
||||
pred[i] = float(out_data[i]);
|
||||
cbuf[i] = (unsigned char)((out_data[i]) * 255);
|
||||
}
|
||||
|
||||
cv::Mat cbuf_map(n2, n3, CV_8UC1, (unsigned char *)cbuf.data());
|
||||
cv::Mat pred_map(n2, n3, CV_32F, (float *)pred.data());
|
||||
|
||||
const double threshold = this->det_db_thresh_ * 255;
|
||||
const double maxvalue = 255;
|
||||
cv::Mat bit_map;
|
||||
cv::threshold(cbuf_map, bit_map, threshold, maxvalue, cv::THRESH_BINARY);
|
||||
cv::Mat dilation_map;
|
||||
cv::Mat dila_ele = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(2, 2));
|
||||
cv::dilate(bit_map, dilation_map, dila_ele);
|
||||
boxes = post_processor_.BoxesFromBitmap(
|
||||
pred_map, dilation_map, this->det_db_box_thresh_,
|
||||
this->det_db_unclip_ratio_, this->use_polygon_score_);
|
||||
|
||||
boxes = post_processor_.FilterTagDetRes(boxes, ratio_h, ratio_w, srcimg);
|
||||
std::cout << "Detected boxes num: " << boxes.size() << endl;
|
||||
|
||||
//// visualization
|
||||
if (this->visualize_) {
|
||||
Utility::VisualizeBboxes(srcimg, boxes);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace PaddleOCR
|
|
@ -1,16 +1,3 @@
|
|||
set -o errexit
|
||||
|
||||
if [ $# != 1 ] ; then
|
||||
echo "USAGE: $0 MODE (one of ['det', 'rec', 'system'])"
|
||||
echo " e.g.: $0 system"
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
# MODE be one of ['det', 'rec', 'system']
|
||||
MODE=$1
|
||||
cp CMakeLists_$MODE.txt CMakeLists.txt
|
||||
|
||||
|
||||
OPENCV_DIR=/paddle/git/new/PaddleOCR/deploy/cpp_infer/opencv-3.4.7/opencv3/
|
||||
LIB_DIR=/paddle/git/new/PaddleOCR/deploy/cpp_infer/paddle_inference/
|
||||
CUDA_LIB_DIR=/usr/local/cuda/lib64/
|
||||
|
|
|
@ -1,31 +0,0 @@
|
|||
# model load config
|
||||
use_gpu 0
|
||||
gpu_id 0
|
||||
gpu_mem 4000
|
||||
cpu_math_library_num_threads 10
|
||||
use_mkldnn 0
|
||||
|
||||
# det config
|
||||
max_side_len 960
|
||||
det_db_thresh 0.3
|
||||
det_db_box_thresh 0.5
|
||||
det_db_unclip_ratio 1.6
|
||||
use_polygon_score 1
|
||||
det_model_dir ./inference/ch_ppocr_mobile_v2.0_det_infer/
|
||||
|
||||
# cls config
|
||||
use_angle_cls 0
|
||||
cls_model_dir ./inference/ch_ppocr_mobile_v2.0_cls_infer/
|
||||
cls_thresh 0.9
|
||||
|
||||
# rec config
|
||||
rec_model_dir ./inference/ch_ppocr_mobile_v2.0_rec_infer/
|
||||
char_list_file ../../ppocr/utils/ppocr_keys_v1.txt
|
||||
|
||||
# show the detection results
|
||||
visualize 0
|
||||
|
||||
# use_tensorrt
|
||||
use_tensorrt 0
|
||||
use_fp16 0
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
|
||||
./build/ocr_system ./tools/config.txt ../../doc/imgs/12.jpg
|
Loading…
Reference in New Issue