2020-12-09 20:42:41 +08:00
|
|
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
import math
|
2020-12-12 15:35:28 +08:00
|
|
|
import numpy as np
|
2020-12-09 20:42:41 +08:00
|
|
|
import paddle
|
|
|
|
from paddle import nn
|
|
|
|
from paddle.nn import functional as F
|
2020-12-11 11:56:40 +08:00
|
|
|
import parakeet
|
2020-12-09 20:42:41 +08:00
|
|
|
from parakeet.modules.conv import Conv1dBatchNorm
|
|
|
|
from parakeet.modules.attention import LocationSensitiveAttention
|
|
|
|
from parakeet.modules import masking
|
2020-12-12 15:35:28 +08:00
|
|
|
from parakeet.utils import checkpoint
|
2020-12-09 20:42:41 +08:00
|
|
|
|
|
|
|
__all__ = ["Tacotron2", "Tacotron2Loss"]
|
|
|
|
|
|
|
|
|
|
|
|
class DecoderPreNet(nn.Layer):
|
2020-12-18 15:50:05 +08:00
|
|
|
"""Decoder prenet module for Tacotron2.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
d_input: int
|
2020-12-18 17:28:06 +08:00
|
|
|
The input feature size.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
d_hidden: int
|
2020-12-18 17:28:06 +08:00
|
|
|
The hidden size.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
d_output: int
|
2020-12-18 17:28:06 +08:00
|
|
|
The output feature size.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
dropout_rate: float
|
2020-12-18 17:28:06 +08:00
|
|
|
The droput probability.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
"""
|
|
|
|
|
2020-12-09 20:42:41 +08:00
|
|
|
def __init__(self,
|
|
|
|
d_input: int,
|
|
|
|
d_hidden: int,
|
|
|
|
d_output: int,
|
2020-12-18 17:28:06 +08:00
|
|
|
dropout_rate: float):
|
2020-12-09 20:42:41 +08:00
|
|
|
super().__init__()
|
|
|
|
|
2020-12-11 11:56:40 +08:00
|
|
|
self.dropout_rate = dropout_rate
|
2020-12-09 20:42:41 +08:00
|
|
|
self.linear1 = nn.Linear(d_input, d_hidden, bias_attr=False)
|
|
|
|
self.linear2 = nn.Linear(d_hidden, d_output, bias_attr=False)
|
|
|
|
|
|
|
|
def forward(self, x):
|
2020-12-18 15:31:40 +08:00
|
|
|
"""Calculate forward propagation.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
2020-12-18 15:50:05 +08:00
|
|
|
x: Tensor [shape=(B, T_mel, C)]
|
2020-12-18 17:28:06 +08:00
|
|
|
Batch of the sequences of padded mel spectrogram.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
Returns
|
|
|
|
-------
|
2020-12-18 15:50:05 +08:00
|
|
|
output: Tensor [shape=(B, T_mel, C)]
|
2020-12-18 17:28:06 +08:00
|
|
|
Batch of the sequences of padded hidden state.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
"""
|
|
|
|
|
2020-12-28 16:28:55 +08:00
|
|
|
x = F.dropout(
|
|
|
|
F.relu(self.linear1(x)), self.dropout_rate, training=True)
|
|
|
|
output = F.dropout(
|
|
|
|
F.relu(self.linear2(x)), self.dropout_rate, training=True)
|
2020-12-09 20:42:41 +08:00
|
|
|
return output
|
|
|
|
|
|
|
|
|
|
|
|
class DecoderPostNet(nn.Layer):
|
2020-12-18 15:50:05 +08:00
|
|
|
"""Decoder postnet module for Tacotron2.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
d_mels: int
|
2020-12-18 17:28:06 +08:00
|
|
|
The number of mel bands.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
d_hidden: int
|
2020-12-18 17:28:06 +08:00
|
|
|
The hidden size of postnet.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
kernel_size: int
|
2020-12-18 17:28:06 +08:00
|
|
|
The kernel size of the conv layer in postnet.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
num_layers: int
|
2020-12-18 17:28:06 +08:00
|
|
|
The number of conv layers in postnet.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
dropout: float
|
2020-12-18 17:28:06 +08:00
|
|
|
The droput probability.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
"""
|
|
|
|
|
2020-12-09 20:42:41 +08:00
|
|
|
def __init__(self,
|
2020-12-18 17:28:06 +08:00
|
|
|
d_mels: int,
|
|
|
|
d_hidden: int,
|
|
|
|
kernel_size: int,
|
|
|
|
num_layers: int,
|
|
|
|
dropout: float):
|
2020-12-09 20:42:41 +08:00
|
|
|
super().__init__()
|
|
|
|
self.dropout = dropout
|
2020-12-11 11:56:40 +08:00
|
|
|
self.num_layers = num_layers
|
2020-12-09 20:42:41 +08:00
|
|
|
|
2020-12-18 15:31:40 +08:00
|
|
|
padding = int((kernel_size - 1) / 2),
|
|
|
|
|
2020-12-09 20:42:41 +08:00
|
|
|
self.conv_batchnorms = nn.LayerList()
|
|
|
|
k = math.sqrt(1.0 / (d_mels * kernel_size))
|
|
|
|
self.conv_batchnorms.append(
|
|
|
|
Conv1dBatchNorm(
|
|
|
|
d_mels,
|
|
|
|
d_hidden,
|
|
|
|
kernel_size=kernel_size,
|
|
|
|
padding=padding,
|
|
|
|
bias_attr=paddle.ParamAttr(initializer=nn.initializer.Uniform(
|
|
|
|
low=-k, high=k)),
|
|
|
|
data_format='NLC'))
|
|
|
|
|
|
|
|
k = math.sqrt(1.0 / (d_hidden * kernel_size))
|
|
|
|
self.conv_batchnorms.extend([
|
|
|
|
Conv1dBatchNorm(
|
|
|
|
d_hidden,
|
|
|
|
d_hidden,
|
|
|
|
kernel_size=kernel_size,
|
|
|
|
padding=padding,
|
|
|
|
bias_attr=paddle.ParamAttr(initializer=nn.initializer.Uniform(
|
|
|
|
low=-k, high=k)),
|
|
|
|
data_format='NLC') for i in range(1, num_layers - 1)
|
|
|
|
])
|
|
|
|
|
|
|
|
self.conv_batchnorms.append(
|
|
|
|
Conv1dBatchNorm(
|
|
|
|
d_hidden,
|
|
|
|
d_mels,
|
|
|
|
kernel_size=kernel_size,
|
|
|
|
padding=padding,
|
|
|
|
bias_attr=paddle.ParamAttr(initializer=nn.initializer.Uniform(
|
|
|
|
low=-k, high=k)),
|
|
|
|
data_format='NLC'))
|
|
|
|
|
|
|
|
def forward(self, input):
|
2020-12-18 15:31:40 +08:00
|
|
|
"""Calculate forward propagation.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
2020-12-18 15:50:05 +08:00
|
|
|
input: Tensor [shape=(B, T_mel, C)]
|
2020-12-18 17:28:06 +08:00
|
|
|
Output sequence of features from decoder.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
Returns
|
|
|
|
-------
|
2020-12-18 15:50:05 +08:00
|
|
|
output: Tensor [shape=(B, T_mel, C)]
|
2020-12-18 17:28:06 +08:00
|
|
|
Output sequence of features after postnet.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
"""
|
|
|
|
|
2020-12-09 20:42:41 +08:00
|
|
|
for i in range(len(self.conv_batchnorms) - 1):
|
|
|
|
input = F.dropout(
|
2020-12-28 16:28:55 +08:00
|
|
|
F.tanh(self.conv_batchnorms[i](input)),
|
|
|
|
self.dropout,
|
|
|
|
training=self.training)
|
|
|
|
output = F.dropout(
|
|
|
|
self.conv_batchnorms[self.num_layers - 1](input),
|
|
|
|
self.dropout,
|
|
|
|
training=self.training)
|
2020-12-18 15:31:40 +08:00
|
|
|
return output
|
2020-12-09 20:42:41 +08:00
|
|
|
|
|
|
|
|
|
|
|
class Tacotron2Encoder(nn.Layer):
|
2020-12-18 15:50:05 +08:00
|
|
|
"""Tacotron2 encoder module for Tacotron2.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
d_hidden: int
|
2020-12-18 17:28:06 +08:00
|
|
|
The hidden size in encoder module.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
conv_layers: int
|
2020-12-18 17:28:06 +08:00
|
|
|
The number of conv layers.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
kernel_size: int
|
2020-12-18 17:28:06 +08:00
|
|
|
The kernel size of conv layers.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
p_dropout: float
|
2020-12-18 17:28:06 +08:00
|
|
|
The droput probability.
|
2020-12-18 15:31:40 +08:00
|
|
|
"""
|
|
|
|
|
2020-12-09 20:42:41 +08:00
|
|
|
def __init__(self,
|
|
|
|
d_hidden: int,
|
|
|
|
conv_layers: int,
|
|
|
|
kernel_size: int,
|
|
|
|
p_dropout: float):
|
|
|
|
super().__init__()
|
|
|
|
|
|
|
|
k = math.sqrt(1.0 / (d_hidden * kernel_size))
|
|
|
|
self.conv_batchnorms = paddle.nn.LayerList([
|
|
|
|
Conv1dBatchNorm(
|
|
|
|
d_hidden,
|
|
|
|
d_hidden,
|
|
|
|
kernel_size,
|
|
|
|
stride=1,
|
|
|
|
padding=int((kernel_size - 1) / 2),
|
|
|
|
bias_attr=paddle.ParamAttr(initializer=nn.initializer.Uniform(
|
|
|
|
low=-k, high=k)),
|
|
|
|
data_format='NLC') for i in range(conv_layers)
|
|
|
|
])
|
|
|
|
self.p_dropout = p_dropout
|
|
|
|
|
|
|
|
self.hidden_size = int(d_hidden / 2)
|
|
|
|
self.lstm = nn.LSTM(
|
|
|
|
d_hidden, self.hidden_size, direction="bidirectional")
|
|
|
|
|
|
|
|
def forward(self, x, input_lens=None):
|
2020-12-18 15:31:40 +08:00
|
|
|
"""Calculate forward propagation of tacotron2 encoder.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
2020-12-18 15:50:05 +08:00
|
|
|
x: Tensor [shape=(B, T)]
|
2020-12-18 17:28:06 +08:00
|
|
|
Batch of the sequencees of padded character ids.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
text_lens: Tensor [shape=(B,)], optional
|
|
|
|
Batch of lengths of each text input batch. Defaults to None.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
Returns
|
|
|
|
-------
|
2020-12-18 15:50:05 +08:00
|
|
|
output : Tensor [shape=(B, T, C)]
|
2020-12-18 17:28:06 +08:00
|
|
|
Batch of the sequences of padded hidden states.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
"""
|
2020-12-11 11:56:40 +08:00
|
|
|
for conv_batchnorm in self.conv_batchnorms:
|
2020-12-28 16:28:55 +08:00
|
|
|
x = F.dropout(
|
|
|
|
F.relu(conv_batchnorm(x)),
|
|
|
|
self.p_dropout,
|
|
|
|
training=self.training)
|
2020-12-09 20:42:41 +08:00
|
|
|
|
|
|
|
output, _ = self.lstm(inputs=x, sequence_length=input_lens)
|
|
|
|
return output
|
|
|
|
|
|
|
|
|
|
|
|
class Tacotron2Decoder(nn.Layer):
|
2020-12-18 15:50:05 +08:00
|
|
|
"""Tacotron2 decoder module for Tacotron2.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
d_mels: int
|
2020-12-18 17:28:06 +08:00
|
|
|
The number of mel bands.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
reduction_factor: int
|
2020-12-18 17:28:06 +08:00
|
|
|
The reduction factor of tacotron.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
d_encoder: int
|
2020-12-18 17:28:06 +08:00
|
|
|
The hidden size of encoder.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
d_prenet: int
|
2020-12-18 17:28:06 +08:00
|
|
|
The hidden size in decoder prenet.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
d_attention_rnn: int
|
2020-12-18 17:28:06 +08:00
|
|
|
The attention rnn layer hidden size.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
d_decoder_rnn: int
|
2020-12-18 17:28:06 +08:00
|
|
|
The decoder rnn layer hidden size.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
d_attention: int
|
2020-12-18 17:28:06 +08:00
|
|
|
The hidden size of the linear layer in location sensitive attention.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
attention_filters: int
|
2020-12-18 17:28:06 +08:00
|
|
|
The filter size of the conv layer in location sensitive attention.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
attention_kernel_size: int
|
2020-12-18 17:28:06 +08:00
|
|
|
The kernel size of the conv layer in location sensitive attention.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
p_prenet_dropout: float
|
2020-12-18 17:28:06 +08:00
|
|
|
The droput probability in decoder prenet.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
p_attention_dropout: float
|
2020-12-18 17:28:06 +08:00
|
|
|
The droput probability in location sensitive attention.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
p_decoder_dropout: float
|
2020-12-18 17:28:06 +08:00
|
|
|
The droput probability in decoder.
|
2020-12-18 15:50:05 +08:00
|
|
|
"""
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-09 20:42:41 +08:00
|
|
|
def __init__(self,
|
|
|
|
d_mels: int,
|
|
|
|
reduction_factor: int,
|
|
|
|
d_encoder: int,
|
|
|
|
d_prenet: int,
|
|
|
|
d_attention_rnn: int,
|
|
|
|
d_decoder_rnn: int,
|
|
|
|
d_attention: int,
|
|
|
|
attention_filters: int,
|
|
|
|
attention_kernel_size: int,
|
|
|
|
p_prenet_dropout: float,
|
|
|
|
p_attention_dropout: float,
|
|
|
|
p_decoder_dropout: float):
|
|
|
|
super().__init__()
|
|
|
|
self.d_mels = d_mels
|
|
|
|
self.reduction_factor = reduction_factor
|
|
|
|
self.d_encoder = d_encoder
|
|
|
|
self.d_attention_rnn = d_attention_rnn
|
|
|
|
self.d_decoder_rnn = d_decoder_rnn
|
|
|
|
self.p_attention_dropout = p_attention_dropout
|
|
|
|
self.p_decoder_dropout = p_decoder_dropout
|
|
|
|
|
|
|
|
self.prenet = DecoderPreNet(
|
|
|
|
d_mels * reduction_factor,
|
|
|
|
d_prenet,
|
|
|
|
d_prenet,
|
|
|
|
dropout_rate=p_prenet_dropout)
|
|
|
|
|
|
|
|
self.attention_rnn = nn.LSTMCell(d_prenet + d_encoder, d_attention_rnn)
|
|
|
|
|
|
|
|
self.attention_layer = LocationSensitiveAttention(
|
|
|
|
d_attention_rnn, d_encoder, d_attention, attention_filters,
|
|
|
|
attention_kernel_size)
|
|
|
|
self.decoder_rnn = nn.LSTMCell(d_attention_rnn + d_encoder,
|
|
|
|
d_decoder_rnn)
|
|
|
|
self.linear_projection = nn.Linear(d_decoder_rnn + d_encoder,
|
|
|
|
d_mels * reduction_factor)
|
|
|
|
self.stop_layer = nn.Linear(d_decoder_rnn + d_encoder, 1)
|
|
|
|
|
|
|
|
def _initialize_decoder_states(self, key):
|
2020-12-18 15:31:40 +08:00
|
|
|
"""init states be used in decoder
|
|
|
|
"""
|
2020-12-09 20:42:41 +08:00
|
|
|
batch_size = key.shape[0]
|
|
|
|
MAX_TIME = key.shape[1]
|
|
|
|
|
|
|
|
self.attention_hidden = paddle.zeros(
|
|
|
|
shape=[batch_size, self.d_attention_rnn], dtype=key.dtype)
|
|
|
|
self.attention_cell = paddle.zeros(
|
|
|
|
shape=[batch_size, self.d_attention_rnn], dtype=key.dtype)
|
|
|
|
|
|
|
|
self.decoder_hidden = paddle.zeros(
|
|
|
|
shape=[batch_size, self.d_decoder_rnn], dtype=key.dtype)
|
|
|
|
self.decoder_cell = paddle.zeros(
|
|
|
|
shape=[batch_size, self.d_decoder_rnn], dtype=key.dtype)
|
|
|
|
|
|
|
|
self.attention_weights = paddle.zeros(
|
|
|
|
shape=[batch_size, MAX_TIME], dtype=key.dtype)
|
|
|
|
self.attention_weights_cum = paddle.zeros(
|
|
|
|
shape=[batch_size, MAX_TIME], dtype=key.dtype)
|
|
|
|
self.attention_context = paddle.zeros(
|
|
|
|
shape=[batch_size, self.d_encoder], dtype=key.dtype)
|
|
|
|
|
|
|
|
self.key = key #[B, T, C]
|
|
|
|
self.processed_key = self.attention_layer.key_layer(key) #[B, T, C]
|
|
|
|
|
|
|
|
def _decode(self, query):
|
2020-12-18 15:31:40 +08:00
|
|
|
"""decode one time step
|
|
|
|
"""
|
2020-12-09 20:42:41 +08:00
|
|
|
cell_input = paddle.concat([query, self.attention_context], axis=-1)
|
|
|
|
|
|
|
|
# The first lstm layer
|
|
|
|
_, (self.attention_hidden, self.attention_cell) = self.attention_rnn(
|
|
|
|
cell_input, (self.attention_hidden, self.attention_cell))
|
2020-12-28 16:28:55 +08:00
|
|
|
self.attention_hidden = F.dropout(
|
|
|
|
self.attention_hidden,
|
|
|
|
self.p_attention_dropout,
|
|
|
|
training=self.training)
|
2020-12-09 20:42:41 +08:00
|
|
|
|
|
|
|
# Loaction sensitive attention
|
|
|
|
attention_weights_cat = paddle.stack(
|
|
|
|
[self.attention_weights, self.attention_weights_cum], axis=-1)
|
|
|
|
self.attention_context, self.attention_weights = self.attention_layer(
|
|
|
|
self.attention_hidden, self.processed_key, self.key,
|
|
|
|
attention_weights_cat, self.mask)
|
|
|
|
self.attention_weights_cum += self.attention_weights
|
|
|
|
|
2020-12-11 11:56:40 +08:00
|
|
|
# The second lstm layer
|
2020-12-09 20:42:41 +08:00
|
|
|
decoder_input = paddle.concat(
|
|
|
|
[self.attention_hidden, self.attention_context], axis=-1)
|
|
|
|
_, (self.decoder_hidden, self.decoder_cell) = self.decoder_rnn(
|
|
|
|
decoder_input, (self.decoder_hidden, self.decoder_cell))
|
|
|
|
self.decoder_hidden = F.dropout(
|
2020-12-28 16:28:55 +08:00
|
|
|
self.decoder_hidden,
|
|
|
|
p=self.p_decoder_dropout,
|
|
|
|
training=self.training)
|
2020-12-09 20:42:41 +08:00
|
|
|
|
|
|
|
# decode output one step
|
|
|
|
decoder_hidden_attention_context = paddle.concat(
|
|
|
|
[self.decoder_hidden, self.attention_context], axis=-1)
|
|
|
|
decoder_output = self.linear_projection(
|
|
|
|
decoder_hidden_attention_context)
|
|
|
|
stop_logit = self.stop_layer(decoder_hidden_attention_context)
|
|
|
|
return decoder_output, stop_logit, self.attention_weights
|
|
|
|
|
2020-12-11 11:56:40 +08:00
|
|
|
def forward(self, keys, querys, mask):
|
2020-12-18 15:31:40 +08:00
|
|
|
"""Calculate forward propagation of tacotron2 decoder.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
2020-12-18 17:28:06 +08:00
|
|
|
keys: Tensor[shape=(B, T_key, C)]
|
|
|
|
Batch of the sequences of padded output from encoder.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
querys: Tensor[shape(B, T_query, C)]
|
|
|
|
Batch of the sequences of padded mel spectrogram.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
mask: Tensor
|
|
|
|
Mask generated with text length. Shape should be (B, T_key, T_query) or broadcastable shape.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
Returns
|
|
|
|
-------
|
2020-12-18 17:28:06 +08:00
|
|
|
mel_output: Tensor [shape=(B, T_query, C)]
|
|
|
|
Output sequence of features.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
stop_logits: Tensor [shape=(B, T_query)]
|
|
|
|
Output sequence of stop logits.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
alignments: Tensor [shape=(B, T_query, T_key)]
|
|
|
|
Attention weights.
|
2020-12-18 15:31:40 +08:00
|
|
|
"""
|
2020-12-11 11:56:40 +08:00
|
|
|
querys = paddle.reshape(
|
|
|
|
querys,
|
|
|
|
[querys.shape[0], querys.shape[1] // self.reduction_factor, -1])
|
|
|
|
querys = paddle.concat(
|
2020-12-09 20:42:41 +08:00
|
|
|
[
|
|
|
|
paddle.zeros(
|
2020-12-17 10:56:45 +08:00
|
|
|
shape=[querys.shape[0], 1, querys.shape[-1]],
|
2020-12-11 11:56:40 +08:00
|
|
|
dtype=querys.dtype), querys
|
2020-12-09 20:42:41 +08:00
|
|
|
],
|
|
|
|
axis=1)
|
2020-12-11 11:56:40 +08:00
|
|
|
querys = self.prenet(querys)
|
2020-12-09 20:42:41 +08:00
|
|
|
|
2020-12-11 11:56:40 +08:00
|
|
|
self._initialize_decoder_states(keys)
|
2020-12-09 20:42:41 +08:00
|
|
|
self.mask = mask
|
|
|
|
|
|
|
|
mel_outputs, stop_logits, alignments = [], [], []
|
2020-12-11 11:56:40 +08:00
|
|
|
while len(mel_outputs) < querys.shape[
|
2020-12-09 20:42:41 +08:00
|
|
|
1] - 1: # Ignore the last time step
|
2020-12-11 11:56:40 +08:00
|
|
|
query = querys[:, len(mel_outputs), :]
|
2020-12-09 20:42:41 +08:00
|
|
|
mel_output, stop_logit, attention_weights = self._decode(query)
|
|
|
|
mel_outputs += [mel_output]
|
|
|
|
stop_logits += [stop_logit]
|
|
|
|
alignments += [attention_weights]
|
|
|
|
|
|
|
|
alignments = paddle.stack(alignments, axis=1)
|
|
|
|
stop_logits = paddle.concat(stop_logits, axis=1)
|
|
|
|
mel_outputs = paddle.stack(mel_outputs, axis=1)
|
|
|
|
|
|
|
|
return mel_outputs, stop_logits, alignments
|
|
|
|
|
|
|
|
def infer(self, key, stop_threshold=0.5, max_decoder_steps=1000):
|
2020-12-18 15:31:40 +08:00
|
|
|
"""Calculate forward propagation of tacotron2 decoder.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
2020-12-18 17:28:06 +08:00
|
|
|
keys: Tensor [shape=(B, T_key, C)]
|
|
|
|
Batch of the sequences of padded output from encoder.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
stop_threshold: float, optional
|
|
|
|
Stop synthesize when stop logit is greater than this stop threshold. Defaults to 0.5.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
max_decoder_steps: int, optional
|
|
|
|
Number of max step when synthesize. Defaults to 1000.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
Returns
|
|
|
|
-------
|
2020-12-18 15:50:05 +08:00
|
|
|
mel_output: Tensor [shape=(B, T_mel, C)]
|
2020-12-18 17:28:06 +08:00
|
|
|
Output sequence of features.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 15:50:05 +08:00
|
|
|
stop_logits: Tensor [shape=(B, T_mel)]
|
2020-12-18 17:28:06 +08:00
|
|
|
Output sequence of stop logits.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
alignments: Tensor [shape=(B, T_mel, T_key)]
|
|
|
|
Attention weights.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
"""
|
2020-12-17 10:56:45 +08:00
|
|
|
query = paddle.zeros(
|
2020-12-09 20:42:41 +08:00
|
|
|
shape=[key.shape[0], self.d_mels * self.reduction_factor],
|
|
|
|
dtype=key.dtype) #[B, C]
|
|
|
|
|
2020-12-12 15:35:28 +08:00
|
|
|
self._initialize_decoder_states(key)
|
2020-12-09 20:42:41 +08:00
|
|
|
self.mask = None
|
|
|
|
|
|
|
|
mel_outputs, stop_logits, alignments = [], [], []
|
|
|
|
while True:
|
2020-12-17 10:56:45 +08:00
|
|
|
query = self.prenet(query)
|
|
|
|
mel_output, stop_logit, alignment = self._decode(query)
|
2020-12-09 20:42:41 +08:00
|
|
|
|
|
|
|
mel_outputs += [mel_output]
|
|
|
|
stop_logits += [stop_logit]
|
|
|
|
alignments += [alignment]
|
|
|
|
|
|
|
|
if F.sigmoid(stop_logit) > stop_threshold:
|
|
|
|
break
|
|
|
|
elif len(mel_outputs) == max_decoder_steps:
|
|
|
|
print("Warning! Reached max decoder steps!!!")
|
|
|
|
break
|
|
|
|
|
2020-12-17 10:56:45 +08:00
|
|
|
query = mel_output
|
2020-12-09 20:42:41 +08:00
|
|
|
|
|
|
|
alignments = paddle.stack(alignments, axis=1)
|
|
|
|
stop_logits = paddle.concat(stop_logits, axis=1)
|
|
|
|
mel_outputs = paddle.stack(mel_outputs, axis=1)
|
|
|
|
|
|
|
|
return mel_outputs, stop_logits, alignments
|
|
|
|
|
|
|
|
|
|
|
|
class Tacotron2(nn.Layer):
|
2020-12-18 15:50:05 +08:00
|
|
|
"""Tacotron2 model for end-to-end text-to-speech (E2E-TTS).
|
2020-12-09 20:42:41 +08:00
|
|
|
|
2020-12-18 15:31:40 +08:00
|
|
|
This is a model of Spectrogram prediction network in Tacotron2 described
|
2020-12-18 17:28:06 +08:00
|
|
|
in `Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions
|
|
|
|
<https://arxiv.org/abs/1712.05884>`_,
|
2020-12-09 20:42:41 +08:00
|
|
|
which converts the sequence of characters
|
|
|
|
into the sequence of mel spectrogram.
|
|
|
|
|
2020-12-18 15:31:40 +08:00
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
frontend : parakeet.frontend.Phonetics
|
2020-12-18 17:28:06 +08:00
|
|
|
Frontend used to preprocess text.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
d_mels: int
|
2020-12-18 17:28:06 +08:00
|
|
|
Number of mel bands.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
d_encoder: int
|
2020-12-18 17:28:06 +08:00
|
|
|
Hidden size in encoder module.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
encoder_conv_layers: int
|
2020-12-18 17:28:06 +08:00
|
|
|
Number of conv layers in encoder.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
encoder_kernel_size: int
|
2020-12-18 17:28:06 +08:00
|
|
|
Kernel size of conv layers in encoder.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
d_prenet: int
|
2020-12-18 17:28:06 +08:00
|
|
|
Hidden size in decoder prenet.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
d_attention_rnn: int
|
2020-12-18 17:28:06 +08:00
|
|
|
Attention rnn layer hidden size in decoder.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
d_decoder_rnn: int
|
2020-12-18 17:28:06 +08:00
|
|
|
Decoder rnn layer hidden size in decoder.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
attention_filters: int
|
2020-12-18 17:28:06 +08:00
|
|
|
Filter size of the conv layer in location sensitive attention.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
attention_kernel_size: int
|
2020-12-18 17:28:06 +08:00
|
|
|
Kernel size of the conv layer in location sensitive attention.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
d_attention: int
|
2020-12-18 17:28:06 +08:00
|
|
|
Hidden size of the linear layer in location sensitive attention.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
d_postnet: int
|
2020-12-18 17:28:06 +08:00
|
|
|
Hidden size of postnet.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
postnet_kernel_size: int
|
2020-12-18 17:28:06 +08:00
|
|
|
Kernel size of the conv layer in postnet.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
postnet_conv_layers: int
|
2020-12-18 17:28:06 +08:00
|
|
|
Number of conv layers in postnet.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
reduction_factor: int
|
2020-12-18 17:28:06 +08:00
|
|
|
Reduction factor of tacotron2.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
p_encoder_dropout: float
|
2020-12-18 17:28:06 +08:00
|
|
|
Droput probability in encoder.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
p_prenet_dropout: float
|
2020-12-18 17:28:06 +08:00
|
|
|
Droput probability in decoder prenet.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
p_attention_dropout: float
|
2020-12-18 17:28:06 +08:00
|
|
|
Droput probability in location sensitive attention.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
p_decoder_dropout: float
|
2020-12-18 17:28:06 +08:00
|
|
|
Droput probability in decoder.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
p_postnet_dropout: float
|
2020-12-18 17:28:06 +08:00
|
|
|
Droput probability in postnet.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-09 20:42:41 +08:00
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self,
|
|
|
|
frontend: parakeet.frontend.Phonetics,
|
|
|
|
d_mels: int=80,
|
|
|
|
d_encoder: int=512,
|
2020-12-11 11:56:40 +08:00
|
|
|
encoder_conv_layers: int=3,
|
2020-12-09 20:42:41 +08:00
|
|
|
encoder_kernel_size: int=5,
|
|
|
|
d_prenet: int=256,
|
|
|
|
d_attention_rnn: int=1024,
|
|
|
|
d_decoder_rnn: int=1024,
|
|
|
|
attention_filters: int=32,
|
|
|
|
attention_kernel_size: int=31,
|
|
|
|
d_attention: int=128,
|
|
|
|
d_postnet: int=512,
|
|
|
|
postnet_kernel_size: int=5,
|
|
|
|
postnet_conv_layers: int=5,
|
|
|
|
reduction_factor: int=1,
|
|
|
|
p_encoder_dropout: float=0.5,
|
|
|
|
p_prenet_dropout: float=0.5,
|
|
|
|
p_attention_dropout: float=0.1,
|
|
|
|
p_decoder_dropout: float=0.1,
|
|
|
|
p_postnet_dropout: float=0.5):
|
|
|
|
super().__init__()
|
|
|
|
|
2020-12-12 15:35:28 +08:00
|
|
|
self.frontend = frontend
|
|
|
|
std = math.sqrt(2.0 / (self.frontend.vocab_size + d_encoder))
|
2020-12-09 20:42:41 +08:00
|
|
|
val = math.sqrt(3.0) * std # uniform bounds for std
|
|
|
|
self.embedding = nn.Embedding(
|
2020-12-12 15:35:28 +08:00
|
|
|
self.frontend.vocab_size,
|
2020-12-11 11:56:40 +08:00
|
|
|
d_encoder,
|
2020-12-09 20:42:41 +08:00
|
|
|
weight_attr=paddle.ParamAttr(initializer=nn.initializer.Uniform(
|
|
|
|
low=-val, high=val)))
|
|
|
|
self.encoder = Tacotron2Encoder(d_encoder, encoder_conv_layers,
|
|
|
|
encoder_kernel_size, p_encoder_dropout)
|
|
|
|
self.decoder = Tacotron2Decoder(
|
|
|
|
d_mels, reduction_factor, d_encoder, d_prenet, d_attention_rnn,
|
|
|
|
d_decoder_rnn, d_attention, attention_filters,
|
|
|
|
attention_kernel_size, p_prenet_dropout, p_attention_dropout,
|
|
|
|
p_decoder_dropout)
|
|
|
|
self.postnet = DecoderPostNet(
|
2020-12-17 10:56:45 +08:00
|
|
|
d_mels=d_mels * reduction_factor,
|
2020-12-09 20:42:41 +08:00
|
|
|
d_hidden=d_postnet,
|
|
|
|
kernel_size=postnet_kernel_size,
|
|
|
|
num_layers=postnet_conv_layers,
|
|
|
|
dropout=p_postnet_dropout)
|
|
|
|
|
|
|
|
def forward(self, text_inputs, mels, text_lens, output_lens=None):
|
2020-12-18 15:31:40 +08:00
|
|
|
"""Calculate forward propagation of tacotron2.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
2020-12-18 15:50:05 +08:00
|
|
|
text_inputs: Tensor [shape=(B, T_text)]
|
2020-12-18 17:28:06 +08:00
|
|
|
Batch of the sequencees of padded character ids.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 15:50:05 +08:00
|
|
|
mels: Tensor [shape(B, T_mel, C)]
|
2020-12-18 17:28:06 +08:00
|
|
|
Batch of the sequences of padded mel spectrogram.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 15:50:05 +08:00
|
|
|
text_lens: Tensor [shape=(B,)]
|
2020-12-18 17:28:06 +08:00
|
|
|
Batch of lengths of each text input batch.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
output_lens: Tensor [shape=(B,)], optional
|
|
|
|
Batch of lengths of each mels batch. Defaults to None.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
Returns
|
|
|
|
-------
|
|
|
|
outputs : Dict[str, Tensor]
|
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
mel_output: output sequence of features (B, T_mel, C);
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
mel_outputs_postnet: output sequence of features after postnet (B, T_mel, C);
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
stop_logits: output sequence of stop logits (B, T_mel);
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
alignments: attention weights (B, T_mel, T_text).
|
2020-12-18 15:31:40 +08:00
|
|
|
"""
|
2020-12-09 20:42:41 +08:00
|
|
|
embedded_inputs = self.embedding(text_inputs)
|
|
|
|
encoder_outputs = self.encoder(embedded_inputs, text_lens)
|
|
|
|
|
|
|
|
mask = paddle.tensor.unsqueeze(
|
|
|
|
paddle.fluid.layers.sequence_mask(
|
|
|
|
x=text_lens, dtype=encoder_outputs.dtype), [-1])
|
|
|
|
mel_outputs, stop_logits, alignments = self.decoder(
|
|
|
|
encoder_outputs, mels, mask=mask)
|
|
|
|
|
|
|
|
mel_outputs_postnet = self.postnet(mel_outputs)
|
|
|
|
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
|
|
|
|
|
|
|
|
if output_lens is not None:
|
|
|
|
mask = paddle.tensor.unsqueeze(
|
|
|
|
paddle.fluid.layers.sequence_mask(x=output_lens),
|
|
|
|
[-1]) #[B, T, 1]
|
|
|
|
mel_outputs = mel_outputs * mask #[B, T, C]
|
|
|
|
mel_outputs_postnet = mel_outputs_postnet * mask #[B, T, C]
|
|
|
|
stop_logits = stop_logits * mask[:, :, 0] + (1 - mask[:, :, 0]
|
|
|
|
) * 1e3 #[B, T]
|
|
|
|
outputs = {
|
|
|
|
"mel_output": mel_outputs,
|
|
|
|
"mel_outputs_postnet": mel_outputs_postnet,
|
|
|
|
"stop_logits": stop_logits,
|
|
|
|
"alignments": alignments
|
|
|
|
}
|
|
|
|
|
|
|
|
return outputs
|
|
|
|
|
2020-12-12 15:35:28 +08:00
|
|
|
@paddle.no_grad()
|
2020-12-09 20:42:41 +08:00
|
|
|
def infer(self, text_inputs, stop_threshold=0.5, max_decoder_steps=1000):
|
2020-12-18 15:31:40 +08:00
|
|
|
"""Generate the mel sepctrogram of features given the sequences of character ids.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
2020-12-18 15:50:05 +08:00
|
|
|
text_inputs: Tensor [shape=(B, T_text)]
|
2020-12-18 17:28:06 +08:00
|
|
|
Batch of the sequencees of padded character ids.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
stop_threshold: float, optional
|
|
|
|
Stop synthesize when stop logit is greater than this stop threshold. Defaults to 0.5.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
max_decoder_steps: int, optional
|
|
|
|
Number of max step when synthesize. Defaults to 1000.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
Returns
|
|
|
|
-------
|
|
|
|
outputs : Dict[str, Tensor]
|
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
mel_output: output sequence of sepctrogram (B, T_mel, C);
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
mel_outputs_postnet: output sequence of sepctrogram after postnet (B, T_mel, C);
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
stop_logits: output sequence of stop logits (B, T_mel);
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
alignments: attention weights (B, T_mel, T_text).
|
2020-12-18 15:31:40 +08:00
|
|
|
"""
|
2020-12-09 20:42:41 +08:00
|
|
|
embedded_inputs = self.embedding(text_inputs)
|
|
|
|
encoder_outputs = self.encoder(embedded_inputs)
|
2020-12-12 15:35:28 +08:00
|
|
|
mel_outputs, stop_logits, alignments = self.decoder.infer(
|
2020-12-09 20:42:41 +08:00
|
|
|
encoder_outputs,
|
|
|
|
stop_threshold=stop_threshold,
|
|
|
|
max_decoder_steps=max_decoder_steps)
|
|
|
|
|
|
|
|
mel_outputs_postnet = self.postnet(mel_outputs)
|
|
|
|
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
|
|
|
|
|
|
|
|
outputs = {
|
|
|
|
"mel_output": mel_outputs,
|
|
|
|
"mel_outputs_postnet": mel_outputs_postnet,
|
|
|
|
"stop_logits": stop_logits,
|
|
|
|
"alignments": alignments
|
|
|
|
}
|
|
|
|
|
|
|
|
return outputs
|
|
|
|
|
2020-12-12 15:35:28 +08:00
|
|
|
@paddle.no_grad()
|
|
|
|
def predict(self, text, stop_threshold=0.5, max_decoder_steps=1000):
|
2020-12-18 15:31:40 +08:00
|
|
|
"""Generate the mel sepctrogram of features given the sequenc of characters.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
text: str
|
2020-12-18 17:28:06 +08:00
|
|
|
Sequence of characters.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
stop_threshold: float, optional
|
|
|
|
Stop synthesize when stop logit is greater than this stop threshold. Defaults to 0.5.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
max_decoder_steps: int, optional
|
|
|
|
Number of max step when synthesize. Defaults to 1000.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
Returns
|
|
|
|
-------
|
|
|
|
outputs : Dict[str, Tensor]
|
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
mel_outputs_postnet: output sequence of sepctrogram after postnet (T_mel, C);
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
alignments: attention weights (T_mel, T_text).
|
2020-12-18 15:31:40 +08:00
|
|
|
"""
|
2020-12-12 15:35:28 +08:00
|
|
|
ids = np.asarray(self.frontend(text))
|
|
|
|
ids = paddle.unsqueeze(paddle.to_tensor(ids, dtype='int64'), [0])
|
|
|
|
outputs = self.infer(ids, stop_threshold, max_decoder_steps)
|
|
|
|
return outputs['mel_outputs_postnet'][0].numpy(), outputs[
|
|
|
|
'alignments'][0].numpy()
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def from_pretrained(cls, frontend, config, checkpoint_path):
|
2020-12-18 15:31:40 +08:00
|
|
|
"""Build a tacotron2 model from a pretrained model.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
frontend: parakeet.frontend.Phonetics
|
2020-12-18 17:28:06 +08:00
|
|
|
Frontend used to preprocess text.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
config: yacs.config.CfgNode
|
2020-12-18 17:28:06 +08:00
|
|
|
Model configs.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 20:28:21 +08:00
|
|
|
checkpoint_path: Path or str
|
|
|
|
The path of pretrained model checkpoint, without extension name.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
Returns
|
|
|
|
-------
|
2020-12-18 20:28:21 +08:00
|
|
|
Tacotron2
|
|
|
|
The model build from pretrined result.
|
2020-12-18 15:31:40 +08:00
|
|
|
"""
|
2020-12-12 15:35:28 +08:00
|
|
|
model = cls(frontend,
|
|
|
|
d_mels=config.data.d_mels,
|
|
|
|
d_encoder=config.model.d_encoder,
|
|
|
|
encoder_conv_layers=config.model.encoder_conv_layers,
|
|
|
|
encoder_kernel_size=config.model.encoder_kernel_size,
|
|
|
|
d_prenet=config.model.d_prenet,
|
|
|
|
d_attention_rnn=config.model.d_attention_rnn,
|
|
|
|
d_decoder_rnn=config.model.d_decoder_rnn,
|
|
|
|
attention_filters=config.model.attention_filters,
|
|
|
|
attention_kernel_size=config.model.attention_kernel_size,
|
|
|
|
d_attention=config.model.d_attention,
|
|
|
|
d_postnet=config.model.d_postnet,
|
|
|
|
postnet_kernel_size=config.model.postnet_kernel_size,
|
|
|
|
postnet_conv_layers=config.model.postnet_conv_layers,
|
|
|
|
reduction_factor=config.model.reduction_factor,
|
|
|
|
p_encoder_dropout=config.model.p_encoder_dropout,
|
|
|
|
p_prenet_dropout=config.model.p_prenet_dropout,
|
|
|
|
p_attention_dropout=config.model.p_attention_dropout,
|
|
|
|
p_decoder_dropout=config.model.p_decoder_dropout,
|
|
|
|
p_postnet_dropout=config.model.p_postnet_dropout)
|
|
|
|
|
|
|
|
checkpoint.load_parameters(model, checkpoint_path=checkpoint_path)
|
|
|
|
return model
|
2020-12-09 20:42:41 +08:00
|
|
|
|
|
|
|
|
|
|
|
class Tacotron2Loss(nn.Layer):
|
2020-12-18 15:31:40 +08:00
|
|
|
""" Tacotron2 Loss module
|
|
|
|
"""
|
|
|
|
|
2020-12-09 20:42:41 +08:00
|
|
|
def __init__(self):
|
|
|
|
super().__init__()
|
|
|
|
|
|
|
|
def forward(self, mel_outputs, mel_outputs_postnet, stop_logits,
|
|
|
|
mel_targets, stop_tokens):
|
2020-12-18 15:31:40 +08:00
|
|
|
"""Calculate tacotron2 loss.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
2020-12-18 15:50:05 +08:00
|
|
|
mel_outputs: Tensor [shape=(B, T_mel, C)]
|
2020-12-18 17:28:06 +08:00
|
|
|
Output mel spectrogram sequence.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 15:50:05 +08:00
|
|
|
mel_outputs_postnet: Tensor [shape(B, T_mel, C)]
|
2020-12-18 17:28:06 +08:00
|
|
|
Output mel spectrogram sequence after postnet.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 15:50:05 +08:00
|
|
|
stop_logits: Tensor [shape=(B, T_mel)]
|
2020-12-18 17:28:06 +08:00
|
|
|
Output sequence of stop logits befor sigmoid.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 15:50:05 +08:00
|
|
|
mel_targets: Tensor [shape=(B, T_mel, C)]
|
2020-12-18 17:28:06 +08:00
|
|
|
Target mel spectrogram sequence.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 15:50:05 +08:00
|
|
|
stop_tokens: Tensor [shape=(B,)]
|
2020-12-18 17:28:06 +08:00
|
|
|
Target stop token.
|
2020-12-18 15:31:40 +08:00
|
|
|
|
|
|
|
Returns
|
|
|
|
-------
|
2020-12-18 15:50:05 +08:00
|
|
|
losses : Dict[str, Tensor]
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
loss: the sum of the other three losses;
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
mel_loss: MSE loss compute by mel_targets and mel_outputs;
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
post_mel_loss: MSE loss compute by mel_targets and mel_outputs_postnet;
|
2020-12-18 15:31:40 +08:00
|
|
|
|
2020-12-18 17:28:06 +08:00
|
|
|
stop_loss: stop loss computed by stop_logits and stop token.
|
2020-12-18 15:31:40 +08:00
|
|
|
"""
|
2020-12-09 20:42:41 +08:00
|
|
|
mel_loss = paddle.nn.MSELoss()(mel_outputs, mel_targets)
|
|
|
|
post_mel_loss = paddle.nn.MSELoss()(mel_outputs_postnet, mel_targets)
|
|
|
|
stop_loss = paddle.nn.BCEWithLogitsLoss()(stop_logits, stop_tokens)
|
|
|
|
total_loss = mel_loss + post_mel_loss + stop_loss
|
|
|
|
losses = dict(
|
|
|
|
loss=total_loss,
|
|
|
|
mel_loss=mel_loss,
|
|
|
|
post_mel_loss=post_mel_loss,
|
|
|
|
stop_loss=stop_loss)
|
|
|
|
return losses
|