update for wavenet and clarinet
This commit is contained in:
parent
ff1d66ea94
commit
e0ba85f6b5
|
@ -40,7 +40,7 @@ sudo apt-get install libsndfile1
|
|||
|
||||
### Install PaddlePaddle
|
||||
|
||||
See [install](https://www.paddlepaddle.org.cn/install/quick) for more details. This repo requires PaddlePaddle **1.7.1** or above.
|
||||
See [install](https://www.paddlepaddle.org.cn/install/quick) for more details. This repo requires PaddlePaddle **1.8.0** or above.
|
||||
|
||||
### Install Parakeet
|
||||
|
||||
|
|
|
@ -163,11 +163,11 @@ if __name__ == "__main__":
|
|||
anneal_interval = train_config["anneal_interval"]
|
||||
lr_scheduler = dg.ExponentialDecay(
|
||||
learning_rate, anneal_interval, anneal_rate, staircase=True)
|
||||
optim = fluid.optimizer.Adam(
|
||||
lr_scheduler, parameter_list=model.parameters())
|
||||
gradiant_max_norm = train_config["gradient_max_norm"]
|
||||
clipper = fluid.dygraph_grad_clip.GradClipByGlobalNorm(
|
||||
gradiant_max_norm)
|
||||
optim = fluid.optimizer.Adam(
|
||||
lr_scheduler,
|
||||
parameter_list=model.parameters(),
|
||||
grad_clip=fluid.clip.ClipByGlobalNorm(gradiant_max_norm))
|
||||
|
||||
# train
|
||||
max_iterations = train_config["max_iterations"]
|
||||
|
@ -229,7 +229,7 @@ if __name__ == "__main__":
|
|||
step_loss))
|
||||
|
||||
l.backward()
|
||||
optim.minimize(l, grad_clip=clipper)
|
||||
optim.minimize(l)
|
||||
optim.clear_gradients()
|
||||
|
||||
if global_step % eval_interval == 0:
|
||||
|
|
|
@ -126,12 +126,11 @@ if __name__ == "__main__":
|
|||
anneal_interval = train_config["anneal_interval"]
|
||||
lr_scheduler = dg.ExponentialDecay(
|
||||
learning_rate, anneal_interval, anneal_rate, staircase=True)
|
||||
optim = fluid.optimizer.Adam(
|
||||
lr_scheduler, parameter_list=model.parameters())
|
||||
|
||||
gradiant_max_norm = train_config["gradient_max_norm"]
|
||||
clipper = fluid.dygraph_grad_clip.GradClipByGlobalNorm(
|
||||
gradiant_max_norm)
|
||||
optim = fluid.optimizer.Adam(
|
||||
lr_scheduler,
|
||||
parameter_list=model.parameters(),
|
||||
grad_clip=fluid.clip.ClipByGlobalNorm(gradiant_max_norm))
|
||||
|
||||
train_loader = fluid.io.DataLoader.from_generator(
|
||||
capacity=10, return_list=True)
|
||||
|
@ -149,7 +148,7 @@ if __name__ == "__main__":
|
|||
log_dir = os.path.join(args.output, "log")
|
||||
writer = SummaryWriter(log_dir)
|
||||
|
||||
# load parameters and optimizer, and opdate iterations done sofar
|
||||
# load parameters and optimizer, and update iterations done so far
|
||||
if args.checkpoint is not None:
|
||||
iteration = io.load_parameters(
|
||||
model, optim, checkpoint_path=args.checkpoint)
|
||||
|
@ -181,7 +180,7 @@ if __name__ == "__main__":
|
|||
writer.add_scalar("learning_rate",
|
||||
optim._learning_rate.step().numpy()[0],
|
||||
global_step)
|
||||
optim.minimize(loss_var, grad_clip=clipper)
|
||||
optim.minimize(loss_var)
|
||||
optim.clear_gradients()
|
||||
print("global_step: {}\tloss: {:<8.6f}".format(global_step,
|
||||
loss_np[0]))
|
||||
|
|
|
@ -29,22 +29,10 @@ def conv2d(input,
|
|||
data_format="NCHW"):
|
||||
padding = tuple(pad for pad_dim in padding for pad in pad_dim)
|
||||
|
||||
inputs = {
|
||||
'Input': [input],
|
||||
'Filter': [weight],
|
||||
}
|
||||
attrs = {
|
||||
'strides': stride,
|
||||
'paddings': padding,
|
||||
'dilations': dilation,
|
||||
'groups': groups,
|
||||
'use_cudnn': use_cudnn,
|
||||
'use_mkldnn': False,
|
||||
'fuse_relu_before_depthwise_conv': False,
|
||||
"padding_algorithm": "EXPLICIT",
|
||||
"data_format": data_format,
|
||||
}
|
||||
attrs = ('strides', stride, 'paddings', padding, 'dilations', dilation,
|
||||
'groups', groups, 'use_cudnn', use_cudnn, 'use_mkldnn', False,
|
||||
'fuse_relu_before_depthwise_conv', False, "padding_algorithm",
|
||||
"EXPLICIT", "data_format", data_format)
|
||||
|
||||
outputs = ops.conv2d(inputs, attrs)
|
||||
out = outputs["Output"][0]
|
||||
return out
|
||||
out = ops.conv2d(input, weight, *attrs)
|
||||
return out
|
||||
|
|
|
@ -111,7 +111,7 @@ class ResidualBlock(dg.Layer):
|
|||
h = h[:, :, :time_steps]
|
||||
|
||||
# condition
|
||||
if condition:
|
||||
if condition is not None:
|
||||
h += self.condition_proj(condition)
|
||||
|
||||
# gated tanh
|
||||
|
@ -398,7 +398,8 @@ class WaveNet(dg.Layer):
|
|||
|
||||
x_std = inv_std * (t - mu)
|
||||
exponent = F.exp(-0.5 * x_std * x_std)
|
||||
pdf_x = 1.0 / np.sqrt(2.0 * np.pi) * inv_std * exponent
|
||||
pdf_x = 1.0 / math.sqrt(2.0 * math.pi) * inv_std * exponent
|
||||
|
||||
pdf_x = p_mixture * pdf_x
|
||||
# pdf_x: [bs, len]
|
||||
pdf_x = F.reduce_sum(pdf_x, dim=-1)
|
||||
|
|
Loading…
Reference in New Issue