diff --git a/examples/fastspeech/synthesis.py b/examples/fastspeech/synthesis.py index de726bd..c12742a 100644 --- a/examples/fastspeech/synthesis.py +++ b/examples/fastspeech/synthesis.py @@ -28,6 +28,8 @@ from parakeet.models.fastspeech.fastspeech import FastSpeech from parakeet.models.transformer_tts.utils import * from parakeet.models.wavenet import WaveNet, UpsampleNet from parakeet.models.clarinet import STFT, Clarinet, ParallelWaveNet +from parakeet.modules import weight_norm +from parakeet.models.waveflow import WaveFlowModule from parakeet.utils.layer_tools import freeze from parakeet.utils import io @@ -35,7 +37,13 @@ from parakeet.utils import io def add_config_options_to_parser(parser): parser.add_argument("--config", type=str, help="path of the config file") parser.add_argument( - "--config_clarinet", type=str, help="path of the clarinet config file") + "--vocoder", + type=str, + default="griffinlim", + choices=['griffinlim', 'clarinet', 'waveflow'], + help="vocoder method") + parser.add_argument( + "--config_vocoder", type=str, help="path of the vocoder config file") parser.add_argument("--use_gpu", type=int, default=0, help="device to use") parser.add_argument( "--alpha", @@ -47,9 +55,9 @@ def add_config_options_to_parser(parser): parser.add_argument( "--checkpoint", type=str, help="fastspeech checkpoint to synthesis") parser.add_argument( - "--checkpoint_clarinet", + "--checkpoint_vocoder", type=str, - help="clarinet checkpoint to synthesis") + help="vocoder checkpoint to synthesis") parser.add_argument( "--output", @@ -83,46 +91,62 @@ def synthesis(text_input, args): pos_text = np.arange(1, text.shape[1] + 1) pos_text = np.expand_dims(pos_text, axis=0) - text = dg.to_variable(text) - pos_text = dg.to_variable(pos_text) + text = dg.to_variable(text).astype(np.int64) + pos_text = dg.to_variable(pos_text).astype(np.int64) _, mel_output_postnet = model(text, pos_text, alpha=args.alpha) - result = np.exp(mel_output_postnet.numpy()) - mel_output_postnet = fluid.layers.transpose( - fluid.layers.squeeze(mel_output_postnet, [0]), [1, 0]) - mel_output_postnet = np.exp(mel_output_postnet.numpy()) - basis = librosa.filters.mel(cfg['audio']['sr'], cfg['audio']['n_fft'], - cfg['audio']['num_mels']) - inv_basis = np.linalg.pinv(basis) - spec = np.maximum(1e-10, np.dot(inv_basis, mel_output_postnet)) + if args.vocoder == 'griffinlim': + #synthesis use griffin-lim + wav = synthesis_with_griffinlim( + mel_output_postnet, + sr=cfg['audio']['sr'], + n_fft=cfg['audio']['n_fft'], + num_mels=cfg['audio']['num_mels'], + power=cfg['audio']['power'], + hop_length=cfg['audio']['hop_length'], + win_length=cfg['audio']['win_length']) + elif args.vocoder == 'clarinet': + # synthesis use clarinet + wav = synthesis_with_clarinet(mel_output_postnet, args.config_vocoder, + args.checkpoint_vocoder, place) + elif args.vocoder == 'waveflow': + wav = synthesis_with_waveflow(mel_output_postnet, args, + args.checkpoint_vocoder, place) + else: + print( + 'vocoder error, we only support griffinlim, clarinet and waveflow, but recevied %s.' + % args.vocoder) - # synthesis use clarinet - wav_clarinet = synthesis_with_clarinet( - args.config_clarinet, args.checkpoint_clarinet, result, place) - writer.add_audio(text_input + '(clarinet)', wav_clarinet, 0, + writer.add_audio(text_input + '(' + args.vocoder + ')', wav, 0, cfg['audio']['sr']) if not os.path.exists(os.path.join(args.output, 'samples')): os.mkdir(os.path.join(args.output, 'samples')) - write( - os.path.join(os.path.join(args.output, 'samples'), 'clarinet.wav'), - cfg['audio']['sr'], wav_clarinet) - - #synthesis use griffin-lim - wav = librosa.core.griffinlim( - spec**cfg['audio']['power'], - hop_length=cfg['audio']['hop_length'], - win_length=cfg['audio']['win_length']) - writer.add_audio(text_input + '(griffin-lim)', wav, 0, cfg['audio']['sr']) write( os.path.join( - os.path.join(args.output, 'samples'), 'grinffin-lim.wav'), + os.path.join(args.output, 'samples'), args.vocoder + '.wav'), cfg['audio']['sr'], wav) print("Synthesis completed !!!") writer.close() -def synthesis_with_clarinet(config_path, checkpoint, mel_spectrogram, place): +def synthesis_with_griffinlim(mel_output, sr, n_fft, num_mels, power, + hop_length, win_length): + mel_output = fluid.layers.transpose( + fluid.layers.squeeze(mel_output, [0]), [1, 0]) + mel_output = np.exp(mel_output.numpy()) + basis = librosa.filters.mel(sr, n_fft, num_mels) + inv_basis = np.linalg.pinv(basis) + spec = np.maximum(1e-10, np.dot(inv_basis, mel_output)) + + wav = librosa.core.griffinlim( + spec**power, hop_length=hop_length, win_length=win_length) + + return wav + + +def synthesis_with_clarinet(mel_output, config_path, checkpoint, place): + mel_spectrogram = np.exp(mel_output.numpy()) with open(config_path, 'rt') as f: config = yaml.safe_load(f) @@ -136,62 +160,86 @@ def synthesis_with_clarinet(config_path, checkpoint, mel_spectrogram, place): # only batch=1 for validation is enabled - with dg.guard(place): - # conditioner(upsampling net) - conditioner_config = config["conditioner"] - upsampling_factors = conditioner_config["upsampling_factors"] - upsample_net = UpsampleNet(upscale_factors=upsampling_factors) - freeze(upsample_net) + fluid.enable_dygraph(place) + # conditioner(upsampling net) + conditioner_config = config["conditioner"] + upsampling_factors = conditioner_config["upsampling_factors"] + upsample_net = UpsampleNet(upscale_factors=upsampling_factors) + freeze(upsample_net) - residual_channels = teacher_config["residual_channels"] - loss_type = teacher_config["loss_type"] - output_dim = teacher_config["output_dim"] - log_scale_min = teacher_config["log_scale_min"] - assert loss_type == "mog" and output_dim == 3, \ - "the teacher wavenet should be a wavenet with single gaussian output" + residual_channels = teacher_config["residual_channels"] + loss_type = teacher_config["loss_type"] + output_dim = teacher_config["output_dim"] + log_scale_min = teacher_config["log_scale_min"] + assert loss_type == "mog" and output_dim == 3, \ + "the teacher wavenet should be a wavenet with single gaussian output" - teacher = WaveNet(n_loop, n_layer, residual_channels, output_dim, - n_mels, filter_size, loss_type, log_scale_min) - # load & freeze upsample_net & teacher - freeze(teacher) + teacher = WaveNet(n_loop, n_layer, residual_channels, output_dim, n_mels, + filter_size, loss_type, log_scale_min) + # load & freeze upsample_net & teacher + freeze(teacher) - student_config = config["student"] - n_loops = student_config["n_loops"] - n_layers = student_config["n_layers"] - student_residual_channels = student_config["residual_channels"] - student_filter_size = student_config["filter_size"] - student_log_scale_min = student_config["log_scale_min"] - student = ParallelWaveNet(n_loops, n_layers, student_residual_channels, - n_mels, student_filter_size) + student_config = config["student"] + n_loops = student_config["n_loops"] + n_layers = student_config["n_layers"] + student_residual_channels = student_config["residual_channels"] + student_filter_size = student_config["filter_size"] + student_log_scale_min = student_config["log_scale_min"] + student = ParallelWaveNet(n_loops, n_layers, student_residual_channels, + n_mels, student_filter_size) - stft_config = config["stft"] - stft = STFT( - n_fft=stft_config["n_fft"], - hop_length=stft_config["hop_length"], - win_length=stft_config["win_length"]) + stft_config = config["stft"] + stft = STFT( + n_fft=stft_config["n_fft"], + hop_length=stft_config["hop_length"], + win_length=stft_config["win_length"]) - lmd = config["loss"]["lmd"] - model = Clarinet(upsample_net, teacher, student, stft, - student_log_scale_min, lmd) - io.load_parameters(model=model, checkpoint_path=checkpoint) + lmd = config["loss"]["lmd"] + model = Clarinet(upsample_net, teacher, student, stft, + student_log_scale_min, lmd) + io.load_parameters(model=model, checkpoint_path=checkpoint) - if not os.path.exists(args.output): - os.makedirs(args.output) - model.eval() + if not os.path.exists(args.output): + os.makedirs(args.output) + model.eval() - # Rescale mel_spectrogram. - min_level, ref_level = 1e-5, 20 # hard code it - mel_spectrogram = 20 * np.log10(np.maximum(min_level, mel_spectrogram)) - mel_spectrogram = mel_spectrogram - ref_level - mel_spectrogram = np.clip((mel_spectrogram + 100) / 100, 0, 1) + # Rescale mel_spectrogram. + min_level, ref_level = 1e-5, 20 # hard code it + mel_spectrogram = 20 * np.log10(np.maximum(min_level, mel_spectrogram)) + mel_spectrogram = mel_spectrogram - ref_level + mel_spectrogram = np.clip((mel_spectrogram + 100) / 100, 0, 1) - mel_spectrogram = dg.to_variable(mel_spectrogram) - mel_spectrogram = fluid.layers.transpose(mel_spectrogram, [0, 2, 1]) + mel_spectrogram = dg.to_variable(mel_spectrogram) + mel_spectrogram = fluid.layers.transpose(mel_spectrogram, [0, 2, 1]) - wav_var = model.synthesis(mel_spectrogram) - wav_np = wav_var.numpy()[0] + wav_var = model.synthesis(mel_spectrogram) + wav_np = wav_var.numpy()[0] - return wav_np + return wav_np + + +def synthesis_with_waveflow(mel_output, args, checkpoint, place): + #mel_output = np.exp(mel_output.numpy()) + mel_output = mel_output.numpy() + + fluid.enable_dygraph(place) + args.config = args.config_vocoder + args.use_fp16 = False + config = io.add_yaml_config_to_args(args) + + mel_spectrogram = dg.to_variable(mel_output) + mel_spectrogram = fluid.layers.transpose(mel_spectrogram, [0, 2, 1]) + + # Build model. + waveflow = WaveFlowModule(config) + io.load_parameters(model=waveflow, checkpoint_path=checkpoint) + for layer in waveflow.sublayers(): + if isinstance(layer, weight_norm.WeightNormWrapper): + layer.remove_weight_norm() + + # Run model inference. + wav = waveflow.synthesize(mel_spectrogram, sigma=config.sigma) + return wav.numpy()[0] if __name__ == '__main__': diff --git a/examples/fastspeech/synthesis.sh b/examples/fastspeech/synthesis.sh index a6a0347..79a62d0 100644 --- a/examples/fastspeech/synthesis.sh +++ b/examples/fastspeech/synthesis.sh @@ -1,13 +1,20 @@ # train model +CUDA_VISIBLE_DEVICES=0 \ python -u synthesis.py \ --use_gpu=1 \ --alpha=1.0 \ ---checkpoint='./checkpoint/fastspeech/step-120000' \ +--checkpoint='./checkpoint/fastspeech1024/step-160000' \ --config='configs/ljspeech.yaml' \ ---config_clarine='../clarinet/configs/config.yaml' \ ---checkpoint_clarinet='../clarinet/checkpoint/step-500000' \ --output='./synthesis' \ +--vocoder='waveflow' \ +--config_vocoder='../waveflow/checkpoint/waveflow_res64_ljspeech_ckpt_1.0/waveflow_ljspeech.yaml' \ +--checkpoint_vocoder='../waveflow/checkpoint/waveflow_res64_ljspeech_ckpt_1.0/step-3020000' \ +#--vocoder='clarinet' \ +#--config_vocoder='../clarinet/configs/clarinet_ljspeech.yaml' \ +#--checkpoint_vocoder='../clarinet/checkpoint/step-500000' \ + + if [ $? -ne 0 ]; then echo "Failed in synthesis!" diff --git a/examples/transformer_tts/synthesis.py b/examples/transformer_tts/synthesis.py index 7d7f965..d4e17bf 100644 --- a/examples/transformer_tts/synthesis.py +++ b/examples/transformer_tts/synthesis.py @@ -28,6 +28,10 @@ from parakeet.models.transformer_tts.utils import * from parakeet import audio from parakeet.models.transformer_tts import Vocoder from parakeet.models.transformer_tts import TransformerTTS +from parakeet.modules import weight_norm +from parakeet.models.waveflow import WaveFlowModule +from parakeet.modules.weight_norm import WeightNormWrapper +from parakeet.models.wavenet import UpsampleNet, WaveNet, ConditionalWavenet from parakeet.utils import io @@ -44,6 +48,14 @@ def add_config_options_to_parser(parser): "--checkpoint_transformer", type=str, help="transformer_tts checkpoint to synthesis") + parser.add_argument( + "--vocoder", + type=str, + default="griffinlim", + choices=['griffinlim', 'wavenet', 'waveflow'], + help="vocoder method") + parser.add_argument( + "--config_vocoder", type=str, help="path of the vocoder config file") parser.add_argument( "--checkpoint_vocoder", type=str, @@ -82,31 +94,32 @@ def synthesis(text_input, args): model=model, checkpoint_path=args.checkpoint_transformer) model.eval() - with fluid.unique_name.guard(): - model_vocoder = Vocoder( - cfg['train']['batch_size'], cfg['vocoder']['hidden_size'], - cfg['audio']['num_mels'], cfg['audio']['n_fft']) - # Load parameters. - global_step = io.load_parameters( - model=model_vocoder, checkpoint_path=args.checkpoint_vocoder) - model_vocoder.eval() # init input text = np.asarray(text_to_sequence(text_input)) - text = fluid.layers.unsqueeze(dg.to_variable(text), [0]) + text = fluid.layers.unsqueeze(dg.to_variable(text).astype(np.int64), [0]) mel_input = dg.to_variable(np.zeros([1, 1, 80])).astype(np.float32) pos_text = np.arange(1, text.shape[1] + 1) - pos_text = fluid.layers.unsqueeze(dg.to_variable(pos_text), [0]) + pos_text = fluid.layers.unsqueeze( + dg.to_variable(pos_text).astype(np.int64), [0]) pbar = tqdm(range(args.max_len)) for i in pbar: pos_mel = np.arange(1, mel_input.shape[1] + 1) - pos_mel = fluid.layers.unsqueeze(dg.to_variable(pos_mel), [0]) + pos_mel = fluid.layers.unsqueeze( + dg.to_variable(pos_mel).astype(np.int64), [0]) mel_pred, postnet_pred, attn_probs, stop_preds, attn_enc, attn_dec = model( text, mel_input, pos_text, pos_mel) mel_input = fluid.layers.concat( [mel_input, postnet_pred[:, -1:, :]], axis=1) - - mag_pred = model_vocoder(postnet_pred) + global_step = 0 + for i, prob in enumerate(attn_probs): + for j in range(4): + x = np.uint8(cm.viridis(prob.numpy()[j]) * 255) + writer.add_image( + 'Attention_%d_0' % global_step, + x, + i * 4 + j, + dataformats="HWC") _ljspeech_processor = audio.AudioProcessor( sample_rate=cfg['audio']['sr'], @@ -122,45 +135,130 @@ def synthesis(text_input, args): symmetric_norm=False, max_norm=1., mel_fmin=0, - mel_fmax=None, + mel_fmax=8000, clip_norm=True, griffin_lim_iters=60, do_trim_silence=False, sound_norm=False) + if args.vocoder == 'griffinlim': + #synthesis use griffin-lim + wav = synthesis_with_griffinlim(postnet_pred, _ljspeech_processor) + elif args.vocoder == 'wavenet': + # synthesis use wavenet + wav = synthesis_with_wavenet(postnet_pred, args) + elif args.vocoder == 'waveflow': + # synthesis use waveflow + wav = synthesis_with_waveflow(postnet_pred, args, + args.checkpoint_vocoder, + _ljspeech_processor, place) + else: + print( + 'vocoder error, we only support griffinlim, cbhg and waveflow, but recevied %s.' + % args.vocoder) + + writer.add_audio(text_input + '(' + args.vocoder + ')', wav, 0, + cfg['audio']['sr']) + if not os.path.exists(os.path.join(args.output, 'samples')): + os.mkdir(os.path.join(args.output, 'samples')) + write( + os.path.join( + os.path.join(args.output, 'samples'), args.vocoder + '.wav'), + cfg['audio']['sr'], wav) + print("Synthesis completed !!!") + writer.close() + + +def synthesis_with_griffinlim(mel_output, _ljspeech_processor): + # synthesis with griffin-lim + mel_output = fluid.layers.transpose( + fluid.layers.squeeze(mel_output, [0]), [1, 0]) + mel_output = np.exp(mel_output.numpy()) + basis = librosa.filters.mel(22050, 1024, 80, fmin=0, fmax=8000) + inv_basis = np.linalg.pinv(basis) + spec = np.maximum(1e-10, np.dot(inv_basis, mel_output)) + + wav = librosa.core.griffinlim(spec**1.2, hop_length=256, win_length=1024) + + return wav + + +def synthesis_with_wavenet(mel_output, args): + with open(args.config_vocoder, 'rt') as f: + config = yaml.safe_load(f) + n_mels = config["data"]["n_mels"] + model_config = config["model"] + filter_size = model_config["filter_size"] + upsampling_factors = model_config["upsampling_factors"] + encoder = UpsampleNet(upsampling_factors) + + n_loop = model_config["n_loop"] + n_layer = model_config["n_layer"] + residual_channels = model_config["residual_channels"] + output_dim = model_config["output_dim"] + loss_type = model_config["loss_type"] + log_scale_min = model_config["log_scale_min"] + decoder = WaveNet(n_loop, n_layer, residual_channels, output_dim, n_mels, + filter_size, loss_type, log_scale_min) + + model = ConditionalWavenet(encoder, decoder) + + # load model parameters + iteration = io.load_parameters( + model, checkpoint_path=args.checkpoint_vocoder) + + for layer in model.sublayers(): + if isinstance(layer, WeightNormWrapper): + layer.remove_weight_norm() + mel_output = fluid.layers.transpose(mel_output, [0, 2, 1]) + wav = model.synthesis(mel_output) + return wav.numpy()[0] + + +def synthesis_with_cbhg(mel_output, _ljspeech_processor, cfg): + with fluid.unique_name.guard(): + model_vocoder = Vocoder( + cfg['train']['batch_size'], cfg['vocoder']['hidden_size'], + cfg['audio']['num_mels'], cfg['audio']['n_fft']) + # Load parameters. + global_step = io.load_parameters( + model=model_vocoder, checkpoint_path=args.checkpoint_vocoder) + model_vocoder.eval() + mag_pred = model_vocoder(mel_output) # synthesis with cbhg wav = _ljspeech_processor.inv_spectrogram( fluid.layers.transpose(fluid.layers.squeeze(mag_pred, [0]), [1, 0]) .numpy()) - global_step = 0 - for i, prob in enumerate(attn_probs): - for j in range(4): - x = np.uint8(cm.viridis(prob.numpy()[j]) * 255) - writer.add_image( - 'Attention_%d_0' % global_step, - x, - i * 4 + j, - dataformats="HWC") + return wav - writer.add_audio(text_input + '(cbhg)', wav, 0, cfg['audio']['sr']) - if not os.path.exists(os.path.join(args.output, 'samples')): - os.mkdir(os.path.join(args.output, 'samples')) - write( - os.path.join(os.path.join(args.output, 'samples'), 'cbhg.wav'), - cfg['audio']['sr'], wav) +def synthesis_with_waveflow(mel_output, args, checkpoint, _ljspeech_processor, + place): + mel_output = fluid.layers.transpose( + fluid.layers.squeeze(mel_output, [0]), [1, 0]) + mel_output = mel_output.numpy() + #mel_output = (mel_output - mel_output.min())/(mel_output.max() - mel_output.min()) + #mel_output = 5 * mel_output - 4 + #mel_output = np.log(10) * mel_output - # synthesis with griffin-lim - wav = _ljspeech_processor.inv_melspectrogram( - fluid.layers.transpose( - fluid.layers.squeeze(postnet_pred, [0]), [1, 0]).numpy()) - writer.add_audio(text_input + '(griffin)', wav, 0, cfg['audio']['sr']) + fluid.enable_dygraph(place) + args.config = args.config_vocoder + args.use_fp16 = False + config = io.add_yaml_config_to_args(args) - write( - os.path.join(os.path.join(args.output, 'samples'), 'griffin.wav'), - cfg['audio']['sr'], wav) - print("Synthesis completed !!!") - writer.close() + mel_spectrogram = dg.to_variable(mel_output) + mel_spectrogram = fluid.layers.unsqueeze(mel_spectrogram, [0]) + + # Build model. + waveflow = WaveFlowModule(config) + io.load_parameters(model=waveflow, checkpoint_path=checkpoint) + for layer in waveflow.sublayers(): + if isinstance(layer, weight_norm.WeightNormWrapper): + layer.remove_weight_norm() + + # Run model inference. + wav = waveflow.synthesize(mel_spectrogram, sigma=config.sigma) + return wav.numpy()[0] if __name__ == '__main__': @@ -169,5 +267,6 @@ if __name__ == '__main__': args = parser.parse_args() # Print the whole config setting. pprint(vars(args)) - synthesis("Parakeet stands for Paddle PARAllel text-to-speech toolkit.", - args) + synthesis( + "Life was like a box of chocolates, you never know what you're gonna get.", + args) diff --git a/examples/transformer_tts/synthesis.sh b/examples/transformer_tts/synthesis.sh index 39312f8..a282d70 100644 --- a/examples/transformer_tts/synthesis.sh +++ b/examples/transformer_tts/synthesis.sh @@ -2,12 +2,20 @@ # train model CUDA_VISIBLE_DEVICES=0 \ python -u synthesis.py \ ---max_len=300 \ ---use_gpu=1 \ +--max_len=400 \ +--use_gpu=0 \ --output='./synthesis' \ --config='configs/ljspeech.yaml' \ --checkpoint_transformer='./checkpoint/transformer/step-120000' \ ---checkpoint_vocoder='./checkpoint/vocoder/step-100000' \ +--vocoder='wavenet' \ +--config_vocoder='../wavenet/config.yaml' \ +--checkpoint_vocoder='../wavenet/step-2450000' \ +#--vocoder='waveflow' \ +#--config_vocoder='../waveflow/checkpoint/waveflow_res64_ljspeech_ckpt_1.0/waveflow_ljspeech.yaml' \ +#--checkpoint_vocoder='../waveflow/checkpoint/waveflow_res64_ljspeech_ckpt_1.0/step-3020000' \ +#--vocoder='cbhg' \ +#--config_vocoder='configs/ljspeech.yaml' \ +#--checkpoint_vocoder='checkpoint/cbhg/step-100000' \ if [ $? -ne 0 ]; then echo "Failed in training!" diff --git a/examples/transformer_tts/train_vocoder.py b/examples/transformer_tts/train_vocoder.py index 30f31aa..37e9398 100644 --- a/examples/transformer_tts/train_vocoder.py +++ b/examples/transformer_tts/train_vocoder.py @@ -98,7 +98,7 @@ def main(args): local_rank, is_vocoder=True).reader() - for epoch in range(cfg['train']['max_epochs']): + for epoch in range(cfg['train']['max_iteration']): pbar = tqdm(reader) for i, data in enumerate(pbar): pbar.set_description('Processing at epoch %d' % epoch)