WIP: add some trainig info

This commit is contained in:
chenfeiyu 2021-06-14 17:21:45 +08:00
parent b0983e4d76
commit 95f64c4f02
2 changed files with 9 additions and 6 deletions

View File

@ -86,7 +86,7 @@ lambda_adv: 4.0 # Loss balancing coefficient.
batch_size: 6 # Batch size.
batch_max_steps: 25500 # Length of each audio in batch. Make sure dividable by hop_size.
pin_memory: true # Whether to pin memory in Pytorch DataLoader.
num_workers: 0 # Number of workers in Pytorch DataLoader.
num_workers: 4 # Number of workers in Pytorch DataLoader.
remove_short_samples: true # Whether to remove samples the length of which are less than batch_max_steps.
allow_cache: true # Whether to allow cache in dataset. If true, it requires cpu memory.

View File

@ -76,6 +76,8 @@ class Trainer(object):
else:
max_iteration = self.stop_trigger.period
p = tqdm.tqdm()
while True:
self.observation = {}
# set observation as the report target
@ -84,12 +86,13 @@ class Trainer(object):
# updating parameters and state
with scope(self.observation):
update()
print(self.observation)
p.update()
print(self.observation)
# execute extension when necessary
for name, entry in extensions:
if entry.trigger(self):
entry.extension(self)
# execute extension when necessary
for name, entry in extensions:
if entry.trigger(self):
entry.extension(self)
if stop_trigger(self):
print("Training Done!")