diff --git a/examples/tacotron2_aishell3/tacotron2_msp.py b/examples/tacotron2_aishell3/tacotron2_msp.py index 419654b..e64c4e3 100644 --- a/examples/tacotron2_aishell3/tacotron2_msp.py +++ b/examples/tacotron2_aishell3/tacotron2_msp.py @@ -495,7 +495,7 @@ class Tacotron2Decoder(nn.Layer): if int(paddle.argmax(alignment[0])) == T_enc - 1: if (first_hit_end is None): first_hit_end = i - if first_hit_end is not None and i > (first_hit_end + 6): + if first_hit_end is not None and i > (first_hit_end + 10): print("content exhausted!") break if len(mel_outputs) == max_decoder_steps: diff --git a/examples/tacotron2_vctk/train.py b/examples/tacotron2_vctk/train.py index aaaca48..f3e8cba 100644 --- a/examples/tacotron2_vctk/train.py +++ b/examples/tacotron2_vctk/train.py @@ -115,7 +115,7 @@ class TacotronVCTKExperiment(ExperimentBase): self.optimizer.step() iteration_time = time.time() - start - losses_np = {k: float(v) for k, v in losses.items()} + losses_np = {k: float(v.detach()) for k, v in losses.items()} # logging msg = "Rank: {}, ".format(dist.get_rank()) msg += "step: {}, ".format(self.iteration)