rename rec_resnet_fpn
This commit is contained in:
parent
97cfef3265
commit
5edb619cdd
|
@ -27,7 +27,7 @@ Architecture:
|
|||
function: ppocr.modeling.architectures.rec_model,RecModel
|
||||
|
||||
Backbone:
|
||||
function: ppocr.modeling.backbones.rec_resnet50_fpn,ResNet
|
||||
function: ppocr.modeling.backbones.rec_resnet_fpn,ResNet
|
||||
layers: 50
|
||||
|
||||
Head:
|
||||
|
|
|
@ -22,12 +22,12 @@ import paddle
|
|||
import paddle.fluid as fluid
|
||||
from paddle.fluid.param_attr import ParamAttr
|
||||
|
||||
|
||||
__all__ = ["ResNet", "ResNet18", "ResNet34", "ResNet50", "ResNet101", "ResNet152"]
|
||||
__all__ = [
|
||||
"ResNet", "ResNet18", "ResNet34", "ResNet50", "ResNet101", "ResNet152"
|
||||
]
|
||||
|
||||
Trainable = True
|
||||
w_nolr = fluid.ParamAttr(
|
||||
trainable = Trainable)
|
||||
w_nolr = fluid.ParamAttr(trainable=Trainable)
|
||||
train_parameters = {
|
||||
"input_size": [3, 224, 224],
|
||||
"input_mean": [0.485, 0.456, 0.406],
|
||||
|
@ -40,12 +40,12 @@ train_parameters = {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
class ResNet():
|
||||
def __init__(self, params):
|
||||
self.layers = params['layers']
|
||||
self.params = train_parameters
|
||||
|
||||
|
||||
def __call__(self, input):
|
||||
layers = self.layers
|
||||
supported_layers = [18, 34, 50, 101, 152]
|
||||
|
@ -64,7 +64,12 @@ class ResNet():
|
|||
num_filters = [64, 128, 256, 512]
|
||||
|
||||
conv = self.conv_bn_layer(
|
||||
input=input, num_filters=64, filter_size=7, stride=2, act='relu', name="conv1")
|
||||
input=input,
|
||||
num_filters=64,
|
||||
filter_size=7,
|
||||
stride=2,
|
||||
act='relu',
|
||||
name="conv1")
|
||||
F = []
|
||||
if layers >= 50:
|
||||
for block in range(len(depth)):
|
||||
|
@ -79,7 +84,8 @@ class ResNet():
|
|||
conv = self.bottleneck_block(
|
||||
input=conv,
|
||||
num_filters=num_filters[block],
|
||||
stride=stride_list[block] if i == 0 else 1, name=conv_name)
|
||||
stride=stride_list[block] if i == 0 else 1,
|
||||
name=conv_name)
|
||||
F.append(conv)
|
||||
else:
|
||||
for block in range(len(depth)):
|
||||
|
@ -105,17 +111,40 @@ class ResNet():
|
|||
if (w, h) == base.shape[2:]:
|
||||
base = base
|
||||
else:
|
||||
base = fluid.layers.conv2d_transpose( input=base, num_filters=c,filter_size=4, stride=2,
|
||||
padding=1,act=None,
|
||||
base = fluid.layers.conv2d_transpose(
|
||||
input=base,
|
||||
num_filters=c,
|
||||
filter_size=4,
|
||||
stride=2,
|
||||
padding=1,
|
||||
act=None,
|
||||
param_attr=w_nolr,
|
||||
bias_attr=w_nolr)
|
||||
base = fluid.layers.batch_norm(base, act = "relu", param_attr=w_nolr, bias_attr=w_nolr)
|
||||
base = fluid.layers.batch_norm(
|
||||
base, act="relu", param_attr=w_nolr, bias_attr=w_nolr)
|
||||
base = fluid.layers.concat([base, F[i]], axis=1)
|
||||
base = fluid.layers.conv2d(base, num_filters=c, filter_size=1, param_attr=w_nolr, bias_attr=w_nolr)
|
||||
base = fluid.layers.conv2d(base, num_filters=c, filter_size=3,padding = 1, param_attr=w_nolr, bias_attr=w_nolr)
|
||||
base = fluid.layers.batch_norm(base, act = "relu", param_attr=w_nolr, bias_attr=w_nolr)
|
||||
base = fluid.layers.conv2d(
|
||||
base,
|
||||
num_filters=c,
|
||||
filter_size=1,
|
||||
param_attr=w_nolr,
|
||||
bias_attr=w_nolr)
|
||||
base = fluid.layers.conv2d(
|
||||
base,
|
||||
num_filters=c,
|
||||
filter_size=3,
|
||||
padding=1,
|
||||
param_attr=w_nolr,
|
||||
bias_attr=w_nolr)
|
||||
base = fluid.layers.batch_norm(
|
||||
base, act="relu", param_attr=w_nolr, bias_attr=w_nolr)
|
||||
|
||||
base = fluid.layers.conv2d(base, num_filters=512, filter_size=1,bias_attr=w_nolr,param_attr=w_nolr)
|
||||
base = fluid.layers.conv2d(
|
||||
base,
|
||||
num_filters=512,
|
||||
filter_size=1,
|
||||
bias_attr=w_nolr,
|
||||
param_attr=w_nolr)
|
||||
|
||||
return base
|
||||
|
||||
|
@ -136,7 +165,8 @@ class ResNet():
|
|||
padding=(filter_size - 1) // 2,
|
||||
groups=groups,
|
||||
act=None,
|
||||
param_attr=ParamAttr(name=name + "_weights",trainable = Trainable),
|
||||
param_attr=ParamAttr(
|
||||
name=name + "_weights", trainable=Trainable),
|
||||
bias_attr=False,
|
||||
name=name + '.conv2d.output.1')
|
||||
|
||||
|
@ -144,11 +174,14 @@ class ResNet():
|
|||
bn_name = "bn_" + name
|
||||
else:
|
||||
bn_name = "bn" + name[3:]
|
||||
return fluid.layers.batch_norm(input=conv,
|
||||
return fluid.layers.batch_norm(
|
||||
input=conv,
|
||||
act=act,
|
||||
name=bn_name + '.output.1',
|
||||
param_attr=ParamAttr(name=bn_name + '_scale',trainable = Trainable),
|
||||
bias_attr=ParamAttr(bn_name + '_offset',trainable = Trainable),
|
||||
param_attr=ParamAttr(
|
||||
name=bn_name + '_scale', trainable=Trainable),
|
||||
bias_attr=ParamAttr(
|
||||
bn_name + '_offset', trainable=Trainable),
|
||||
moving_mean_name=bn_name + '_mean',
|
||||
moving_variance_name=bn_name + '_variance', )
|
||||
|
||||
|
@ -165,7 +198,11 @@ class ResNet():
|
|||
|
||||
def bottleneck_block(self, input, num_filters, stride, name):
|
||||
conv0 = self.conv_bn_layer(
|
||||
input=input, num_filters=num_filters, filter_size=1, act='relu', name=name + "_branch2a")
|
||||
input=input,
|
||||
num_filters=num_filters,
|
||||
filter_size=1,
|
||||
act='relu',
|
||||
name=name + "_branch2a")
|
||||
conv1 = self.conv_bn_layer(
|
||||
input=conv0,
|
||||
num_filters=num_filters,
|
||||
|
@ -174,16 +211,36 @@ class ResNet():
|
|||
act='relu',
|
||||
name=name + "_branch2b")
|
||||
conv2 = self.conv_bn_layer(
|
||||
input=conv1, num_filters=num_filters * 4, filter_size=1, act=None, name=name + "_branch2c")
|
||||
input=conv1,
|
||||
num_filters=num_filters * 4,
|
||||
filter_size=1,
|
||||
act=None,
|
||||
name=name + "_branch2c")
|
||||
|
||||
short = self.shortcut(input, num_filters * 4, stride, is_first=False, name=name + "_branch1")
|
||||
short = self.shortcut(
|
||||
input,
|
||||
num_filters * 4,
|
||||
stride,
|
||||
is_first=False,
|
||||
name=name + "_branch1")
|
||||
|
||||
return fluid.layers.elementwise_add(x=short, y=conv2, act='relu', name=name + ".add.output.5")
|
||||
return fluid.layers.elementwise_add(
|
||||
x=short, y=conv2, act='relu', name=name + ".add.output.5")
|
||||
|
||||
def basic_block(self, input, num_filters, stride, is_first, name):
|
||||
conv0 = self.conv_bn_layer(input=input, num_filters=num_filters, filter_size=3, act='relu', stride=stride,
|
||||
conv0 = self.conv_bn_layer(
|
||||
input=input,
|
||||
num_filters=num_filters,
|
||||
filter_size=3,
|
||||
act='relu',
|
||||
stride=stride,
|
||||
name=name + "_branch2a")
|
||||
conv1 = self.conv_bn_layer(input=conv0, num_filters=num_filters, filter_size=3, act=None,
|
||||
conv1 = self.conv_bn_layer(
|
||||
input=conv0,
|
||||
num_filters=num_filters,
|
||||
filter_size=3,
|
||||
act=None,
|
||||
name=name + "_branch2b")
|
||||
short = self.shortcut(input, num_filters, stride, is_first, name=name + "_branch1")
|
||||
short = self.shortcut(
|
||||
input, num_filters, stride, is_first, name=name + "_branch1")
|
||||
return fluid.layers.elementwise_add(x=short, y=conv1, act='relu')
|
Loading…
Reference in New Issue