Merge branch 'reborn' of https://github.com/iclementine/Parakeet into reborn
This commit is contained in:
commit
e29502f634
|
@ -12,6 +12,6 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__version__ = "0.0.0"
|
||||
__version__ = "0.2.0"
|
||||
|
||||
from parakeet import data, frontend, models, modules
|
||||
from parakeet import audio, data, datastes, frontend, models, modules, training, utils
|
||||
|
|
|
@ -16,6 +16,8 @@ import librosa
|
|||
import soundfile as sf
|
||||
import numpy as np
|
||||
|
||||
__all__ = ["AudioProcessor"]
|
||||
|
||||
class AudioProcessor(object):
|
||||
def __init__(self,
|
||||
sample_rate:int,
|
||||
|
@ -26,7 +28,7 @@ class AudioProcessor(object):
|
|||
f_min:int=0,
|
||||
f_max:int=None,
|
||||
window="hann",
|
||||
center="True",
|
||||
center=True,
|
||||
pad_mode="reflect"):
|
||||
# read & write
|
||||
self.sample_rate = sample_rate
|
||||
|
|
|
@ -13,6 +13,9 @@ https://github.com/mozilla/TTS/issues/377
|
|||
"""
|
||||
import numpy as np
|
||||
|
||||
__all__ = ["NormalizerBase", "LogMagnitude", "UnitMagnitude"]
|
||||
|
||||
|
||||
class NormalizerBase(object):
|
||||
def transform(self, spec):
|
||||
raise NotImplementedError("transform must be implemented")
|
||||
|
|
|
@ -13,5 +13,4 @@
|
|||
# limitations under the License.
|
||||
|
||||
from .dataset import *
|
||||
from .sampler import *
|
||||
from .batch import *
|
||||
|
|
|
@ -17,6 +17,10 @@ Batch functions for text sequences, audio and spectrograms are provided.
|
|||
"""
|
||||
import numpy as np
|
||||
|
||||
__all__ = [
|
||||
"batch_text_id", "batch_wav", "batch_spec",
|
||||
"TextIDBatcher", "WavBatcher", "SpecBatcher",
|
||||
]
|
||||
|
||||
class TextIDBatcher(object):
|
||||
"""A wrapper class for `batch_text_id`."""
|
||||
|
|
|
@ -16,6 +16,11 @@ import six
|
|||
import paddle
|
||||
from paddle.io import Dataset
|
||||
|
||||
__all__ = [
|
||||
"split", "TransformDataset", "CacheDataset", "TupleDataset",
|
||||
"DictDataset", "SliceDataset", "SubsetDataset", "FilterDataset",
|
||||
"ChainDataset",
|
||||
]
|
||||
|
||||
def split(dataset, first_size):
|
||||
"""A utility function to split a dataset into two datasets."""
|
||||
|
|
|
@ -1,200 +0,0 @@
|
|||
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
At most cases, we have non-stream dataset, which means we can random access it with __getitem__, and we can get the length of the dataset with __len__.
|
||||
|
||||
This suffices for a sampler. We implemente sampler as iterable of valid indices. By valid, we mean 0 <= index < N, where N is the length of the dataset. We then collect several indices within a batch and use them to collect examples from the dataset with __getitem__. Then transform these examples into a batch.
|
||||
|
||||
So the sampler is only responsible for generating valid indices.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
import random
|
||||
import paddle
|
||||
from paddle.io import Sampler
|
||||
|
||||
|
||||
class PartialyRandomizedSimilarTimeLengthSampler(Sampler):
|
||||
"""Partially randmoized sampler, implemented as a example sampler
|
||||
1. Sort by lengths
|
||||
2. Pick a small patch and randomize it
|
||||
3. Permutate mini-batchs
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
lengths,
|
||||
batch_size=4,
|
||||
batch_group_size=None,
|
||||
permutate=True):
|
||||
"""[summary]
|
||||
|
||||
Args:
|
||||
lengths (List[int]): The length of the examples of the dataset. This is the key to be considered as 'time length'.
|
||||
batch_size (int, optional): batch size. Defaults to 4.
|
||||
batch_group_size (int, optional): the size of a small batch. Random shuffling is applied within such patches. If `batch_group_size` is not provided, it is set to min(batch_size * 32, len(self.lengths)). Batch_group_size should be perfectly divided by batch_size. Defaults to None.
|
||||
permutate (bool, optional): permutate batches. Defaults to True.
|
||||
"""
|
||||
_lengths = np.array(
|
||||
lengths,
|
||||
dtype=np.int64) # maybe better implement length as a sort key
|
||||
self.lengths = np.sort(_lengths)
|
||||
self.sorted_indices = np.argsort(_lengths)
|
||||
|
||||
self.batch_size = batch_size
|
||||
if batch_group_size is None:
|
||||
batch_group_size = min(batch_size * 32, len(self.lengths))
|
||||
if batch_group_size % batch_size != 0:
|
||||
batch_group_size -= batch_group_size % batch_size
|
||||
|
||||
self.batch_group_size = batch_group_size
|
||||
assert batch_group_size % batch_size == 0
|
||||
self.permutate = permutate
|
||||
|
||||
def __iter__(self):
|
||||
indices = np.copy(self.sorted_indices)
|
||||
batch_group_size = self.batch_group_size
|
||||
s, e = 0, 0
|
||||
for i in range(len(indices) // batch_group_size):
|
||||
s = i * batch_group_size
|
||||
e = s + batch_group_size
|
||||
random.shuffle(indices[s:e]) # inplace
|
||||
|
||||
# Permutate batches
|
||||
if self.permutate:
|
||||
perm = np.arange(len(indices[:e]) // self.batch_size)
|
||||
random.shuffle(perm)
|
||||
indices[:e] = indices[:e].reshape(
|
||||
-1, self.batch_size)[perm, :].reshape(-1)
|
||||
|
||||
# Handle last elements
|
||||
s += batch_group_size
|
||||
#print(indices)
|
||||
if s < len(indices):
|
||||
random.shuffle(indices[s:])
|
||||
|
||||
return iter(indices)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.sorted_indices)
|
||||
|
||||
|
||||
class BucketSampler(Sampler):
|
||||
def __init__(self,
|
||||
lengths,
|
||||
batch_size=4,
|
||||
batch_group_size=None,
|
||||
permutate=True,
|
||||
num_trainers=1,
|
||||
rank=0):
|
||||
# maybe better implement length as a sort key
|
||||
_lengths = np.array(lengths, dtype=np.int64)
|
||||
self.lengths = np.sort(_lengths)
|
||||
self.sorted_indices = np.argsort(_lengths)
|
||||
self.num_trainers = num_trainers
|
||||
self.rank = rank
|
||||
|
||||
self.dataset_size = len(_lengths)
|
||||
self.num_samples = int(np.ceil(self.dataset_size / num_trainers))
|
||||
self.total_size = self.num_samples * num_trainers
|
||||
assert self.total_size >= self.dataset_size
|
||||
|
||||
self.batch_size = batch_size
|
||||
total_batch_size = num_trainers * batch_size
|
||||
self.total_batch_size = total_batch_size
|
||||
|
||||
if batch_group_size is None:
|
||||
batch_group_size = min(total_batch_size * 32, len(self.lengths))
|
||||
if batch_group_size % total_batch_size != 0:
|
||||
batch_group_size -= batch_group_size % total_batch_size
|
||||
|
||||
self.batch_group_size = batch_group_size
|
||||
assert batch_group_size % total_batch_size == 0
|
||||
self.permutate = permutate
|
||||
|
||||
def __iter__(self):
|
||||
indices = self.sorted_indices
|
||||
|
||||
# Append extra samples to make it evenly distributed on all trainers.
|
||||
num_extras = self.total_size - self.dataset_size
|
||||
extra_indices = np.random.choice(
|
||||
indices, size=(num_extras, ), replace=False)
|
||||
indices = np.concatenate((indices, extra_indices))
|
||||
assert len(indices) == self.total_size
|
||||
|
||||
batch_group_size = self.batch_group_size
|
||||
s, e = 0, 0
|
||||
for i in range(len(indices) // batch_group_size):
|
||||
s = i * batch_group_size
|
||||
e = s + batch_group_size
|
||||
random.shuffle(indices[s:e]) # inplace
|
||||
|
||||
# Permutate batches
|
||||
total_batch_size = self.total_batch_size
|
||||
if self.permutate:
|
||||
perm = np.arange(len(indices[:e]) // total_batch_size)
|
||||
random.shuffle(perm)
|
||||
indices[:e] = indices[:e].reshape(
|
||||
-1, total_batch_size)[perm, :].reshape(-1)
|
||||
|
||||
# Handle last elements
|
||||
s += batch_group_size
|
||||
#print(indices)
|
||||
if s < len(indices):
|
||||
random.shuffle(indices[s:])
|
||||
|
||||
# Subset samples for each trainer.
|
||||
indices = indices[self.rank:self.total_size:self.num_trainers]
|
||||
assert len(indices) == self.num_samples
|
||||
return iter(indices)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.sorted_indices)
|
||||
|
||||
|
||||
class WeightedRandomSampler(Sampler):
|
||||
"""Samples elements from ``[0,..,len(weights)-1]`` with given probabilities (weights).
|
||||
Args:
|
||||
weights (List[float]): a sequence of weights, not necessary summing up to 1.
|
||||
num_samples (int): number of samples to draw.
|
||||
replacement (bool): whether samples are drawn with replacement. When replacement is False, num_samples should not be larger than len(weights).
|
||||
Example:
|
||||
>>> list(WeightedRandomSampler([0.1, 0.9, 0.4, 0.7, 3.0, 0.6], 5, replacement=True))
|
||||
[0, 0, 0, 1, 0]
|
||||
>>> list(WeightedRandomSampler([0.9, 0.4, 0.05, 0.2, 0.3, 0.1], 5, replacement=False))
|
||||
[0, 1, 4, 3, 2]
|
||||
"""
|
||||
|
||||
def __init__(self, weights, num_samples, replacement):
|
||||
if not isinstance(num_samples, int) or num_samples <= 0:
|
||||
raise ValueError("num_samples should be a positive integer "
|
||||
"value, but got num_samples={}".format(
|
||||
num_samples))
|
||||
self.weights = np.array(weights, dtype=np.float64)
|
||||
self.num_samples = num_samples
|
||||
self.replacement = replacement
|
||||
if replacement is False and num_samples > len(weights):
|
||||
raise ValueError(
|
||||
"when replacement is False, num_samples should not be"
|
||||
"larger that length of weight.")
|
||||
|
||||
def __iter__(self):
|
||||
return iter(
|
||||
np.random.choice(
|
||||
len(self.weights),
|
||||
size=(self.num_samples, ),
|
||||
replace=self.replacement,
|
||||
p=self.weights).tolist())
|
||||
|
||||
def __len__(self):
|
||||
return self.num_samples
|
|
@ -2,6 +2,8 @@ from paddle.io import Dataset
|
|||
import os
|
||||
import librosa
|
||||
|
||||
__all__ = ["AudioFolderDataset"]
|
||||
|
||||
class AudioFolderDataset(Dataset):
|
||||
def __init__(self, path, sample_rate, extension="wav"):
|
||||
self.root = os.path.expanduser(path)
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
from paddle.io import Dataset
|
||||
from pathlib import Path
|
||||
|
||||
__all__ = ["LJSpeechMetaData"]
|
||||
|
||||
class LJSpeechMetaData(Dataset):
|
||||
def __init__(self, root):
|
||||
self.root = Path(root).expanduser()
|
||||
|
|
|
@ -6,6 +6,9 @@ from parakeet.frontend import Vocab
|
|||
from opencc import OpenCC
|
||||
from parakeet.frontend.punctuation import get_punctuations
|
||||
|
||||
__all__ = ["Phonetics", "English", "Chinese"]
|
||||
|
||||
|
||||
class Phonetics(ABC):
|
||||
@abstractmethod
|
||||
def __call__(self, sentence):
|
||||
|
|
|
@ -2,6 +2,10 @@ from typing import Dict, Iterable, List
|
|||
from ruamel import yaml
|
||||
from collections import OrderedDict
|
||||
|
||||
|
||||
__all__ = ["Vocab"]
|
||||
|
||||
|
||||
class Vocab(object):
|
||||
def __init__(self, symbols: Iterable[str],
|
||||
padding_symbol="<pad>",
|
||||
|
|
|
@ -32,11 +32,11 @@ def quantize(values, n_bands):
|
|||
"""Linearlly quantize a float Tensor in [-1, 1) to an interger Tensor in [0, n_bands).
|
||||
|
||||
Args:
|
||||
values (Variable): dtype: flaot32 or float64. the floating point value.
|
||||
values (Tensor): dtype: flaot32 or float64. the floating point value.
|
||||
n_bands (int): the number of bands. The output integer Tensor's value is in the range [0, n_bans).
|
||||
|
||||
Returns:
|
||||
Variable: the quantized tensor, dtype: int64.
|
||||
Tensor: the quantized tensor, dtype: int64.
|
||||
"""
|
||||
quantized = paddle.cast((values + 1.0) / 2.0 * n_bands, "int64")
|
||||
return quantized
|
||||
|
@ -46,11 +46,11 @@ def dequantize(quantized, n_bands, dtype=None):
|
|||
"""Linearlly dequantize an integer Tensor into a float Tensor in the range [-1, 1).
|
||||
|
||||
Args:
|
||||
quantized (Variable): dtype: int64. The quantized value in the range [0, n_bands).
|
||||
quantized (Tensor): dtype: int64. The quantized value in the range [0, n_bands).
|
||||
n_bands (int): number of bands. The input integer Tensor's value is in the range [0, n_bans).
|
||||
|
||||
Returns:
|
||||
Variable: the dequantized tensor, dtype is specified by dtype.
|
||||
Tensor: the dequantized tensor, dtype is specified by dtype.
|
||||
"""
|
||||
dtype = dtype or paddle.get_default_dtype()
|
||||
value = (paddle.cast(quantized, dtype) + 0.5) * (2.0 / n_bands) - 1.0
|
||||
|
@ -61,12 +61,12 @@ def crop(x, audio_start, audio_length):
|
|||
"""Crop the upsampled condition to match audio_length. The upsampled condition has the same time steps as the whole audio does. But since audios are sliced to 0.5 seconds randomly while conditions are not, upsampled conditions should also be sliced to extaclt match the time steps of the audio slice.
|
||||
|
||||
Args:
|
||||
x (Variable): shape(B, C, T), dtype float32, the upsample condition.
|
||||
audio_start (Variable): shape(B, ), dtype: int64, the index the starting point.
|
||||
x (Tensor): shape(B, C, T), dtype float32, the upsample condition.
|
||||
audio_start (Tensor): shape(B,), dtype: int64, the index the starting point.
|
||||
audio_length (int): the length of the audio (number of samples it contaions).
|
||||
|
||||
Returns:
|
||||
Variable: shape(B, C, audio_length), cropped condition.
|
||||
Tensor: shape(B, C, audio_length), cropped condition.
|
||||
"""
|
||||
# crop audio
|
||||
slices = [] # for each example
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
from yacs.config import CfgNode
|
||||
|
||||
_C = CfgNode(
|
||||
dict(
|
||||
valid_interval=1000, # validation
|
||||
save_interval=10000, # checkpoint
|
||||
max_iteration=900000, # max iteration to train
|
||||
)
|
||||
)
|
||||
|
||||
def get_default_training_config():
|
||||
return _C.clone()
|
|
@ -0,0 +1,180 @@
|
|||
import time
|
||||
import logging
|
||||
from pathlib import Path
|
||||
import numpy as np
|
||||
import paddle
|
||||
from paddle import distributed as dist
|
||||
from paddle.io import DataLoader, DistributedBatchSampler
|
||||
from tensorboardX import SummaryWriter
|
||||
from collections import defaultdict
|
||||
|
||||
import parakeet
|
||||
from parakeet.utils import checkpoint, mp_tools
|
||||
|
||||
class ExperimentBase(object):
|
||||
"""
|
||||
An experiment template in order to structure the training code and take care of saving, loading, logging, visualization stuffs. It's intended to be flexible and simple.
|
||||
|
||||
So it only handles output directory (create directory for the outut, create a checkpoint directory, dump the config in use and create visualizer and logger)in a standard way without restricting the input/output protocols of the model and dataloader. It leaves the main part for the user to implement their own(setup the model, criterion, optimizer, defaine a training step, define a validation function and customize all the text and visual logs).
|
||||
|
||||
It does not save too much boilerplate code. The users still have to write the forward/backward/update mannually, but they are free to add non-standard behaviors if needed.
|
||||
|
||||
We have some conventions to follow.
|
||||
1. Experiment should have `.model`, `.optimizer`, `.train_loader` and `.valid_loader`, `.config`, `.args` attributes.
|
||||
2. The config should have a `.training` field, which has `valid_interval`, `save_interval` and `max_iteration` keys. It is used as the trigger to invoke validation, checkpointing and stop of the experiment.
|
||||
3. There are three method, namely `train_batch`, `valid`, `setup_model` and `setup_dataloader` that should be implemented.
|
||||
|
||||
Feel free to add/overwrite other methods and standalone functions if you need.
|
||||
|
||||
Examples:
|
||||
--------
|
||||
def main_sp(config, args):
|
||||
exp = Experiment(config, args)
|
||||
exp.setup()
|
||||
exp.run()
|
||||
|
||||
def main(config, args):
|
||||
if args.nprocs > 1 and args.device == "gpu":
|
||||
dist.spawn(main_sp, args=(config, args), nprocs=args.nprocs)
|
||||
else:
|
||||
main_sp(config, args)
|
||||
|
||||
if __name__ == "__main__":
|
||||
config = get_cfg_defaults()
|
||||
parser = default_argument_parser()
|
||||
args = parser.parse_args()
|
||||
if args.config:
|
||||
config.merge_from_file(args.config)
|
||||
if args.opts:
|
||||
config.merge_from_list(args.opts)
|
||||
config.freeze()
|
||||
print(config)
|
||||
print(args)
|
||||
|
||||
main(config, args)
|
||||
|
||||
"""
|
||||
def __init__(self, config, args):
|
||||
self.config = config
|
||||
self.args = args
|
||||
|
||||
def setup(self):
|
||||
if self.parallel:
|
||||
self.init_parallel()
|
||||
|
||||
self.setup_output_dir()
|
||||
self.dump_config()
|
||||
self.setup_visualizer()
|
||||
self.setup_logger()
|
||||
self.setup_checkpointer()
|
||||
|
||||
self.setup_dataloader()
|
||||
self.setup_model()
|
||||
|
||||
self.iteration = 0
|
||||
self.epoch = 0
|
||||
|
||||
@property
|
||||
def parallel(self):
|
||||
return self.args.device == "gpu" and self.args.nprocs > 1
|
||||
|
||||
def init_parallel(self):
|
||||
dist.init_parallel_env()
|
||||
|
||||
def save(self):
|
||||
checkpoint.save_parameters(
|
||||
self.checkpoint_dir, self.iteration, self.model, self.optimizer)
|
||||
|
||||
def resume_or_load(self):
|
||||
iteration = checkpoint.load_parameters(
|
||||
self.model,
|
||||
self.optimizer,
|
||||
checkpoint_dir=self.checkpoint_dir,
|
||||
checkpoint_path=self.args.checkpoint_path)
|
||||
self.iteration = iteration
|
||||
|
||||
def read_batch(self):
|
||||
try:
|
||||
batch = next(self.iterator)
|
||||
except StopIteration:
|
||||
self.new_epoch()
|
||||
batch = next(self.iterator)
|
||||
return batch
|
||||
|
||||
def new_epoch(self):
|
||||
self.epoch += 1
|
||||
if self.parallel:
|
||||
self.train_loader.batch_sampler.set_epoch(self.epoch)
|
||||
self.iterator = iter(self.train_loader)
|
||||
|
||||
def train(self):
|
||||
self.new_epoch()
|
||||
while self.iteration <= self.config.training.max_iteration:
|
||||
self.iteration += 1
|
||||
self.train_batch()
|
||||
|
||||
if self.iteration % self.config.training.valid_interval == 0:
|
||||
self.valid()
|
||||
|
||||
if self.iteration % self.config.training.save_interval == 0:
|
||||
self.save()
|
||||
|
||||
def run(self):
|
||||
self.resume_or_load()
|
||||
try:
|
||||
self.train()
|
||||
except KeyboardInterrupt:
|
||||
self.save()
|
||||
exit(-1)
|
||||
|
||||
@mp_tools.rank_zero_only
|
||||
def setup_output_dir(self):
|
||||
# output dir
|
||||
output_dir = Path(self.args.output).expanduser()
|
||||
output_dir.mkdir(exist_ok=True)
|
||||
|
||||
self.output_dir = output_dir
|
||||
|
||||
@mp_tools.rank_zero_only
|
||||
def setup_checkpointer(self):
|
||||
# checkpoint dir
|
||||
checkpoint_dir = self.output_dir / "checkpoints"
|
||||
checkpoint_dir.mkdir(exist_ok=True)
|
||||
|
||||
self.checkpoint_dir = checkpoint_dir
|
||||
|
||||
@mp_tools.rank_zero_only
|
||||
def setup_visualizer(self):
|
||||
# visualizer
|
||||
visualizer = SummaryWriter(logdir=str(self.output_dir))
|
||||
|
||||
self.visualizer = visualizer
|
||||
|
||||
def setup_logger(self):
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.setLevel("INFO")
|
||||
logger.addHandler(logging.StreamHandler())
|
||||
log_file = self.output_dir / 'worker_{}.log'.format(dist.get_rank())
|
||||
logger.addHandler(logging.FileHandler(str(log_file)))
|
||||
|
||||
self.logger = logger
|
||||
|
||||
@mp_tools.rank_zero_only
|
||||
def dump_config(self):
|
||||
with open(self.output_dir / "config.yaml", 'wt') as f:
|
||||
print(self.config, file=f)
|
||||
|
||||
def train_batch(self):
|
||||
raise NotImplementedError("train_batch should be implemented.")
|
||||
|
||||
@mp_tools.rank_zero_only
|
||||
@paddle.no_grad()
|
||||
def valid(self):
|
||||
raise NotImplementedError("valid should be implemented.")
|
||||
|
||||
def setup_model(self):
|
||||
raise NotImplementedError("setup_model should be implemented.")
|
||||
|
||||
def setup_dataloader(self):
|
||||
raise NotImplementedError("setup_dataloader should be implemented.")
|
||||
|
|
@ -12,4 +12,4 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from . import io, layer_tools, scheduler, display
|
||||
from . import checkpoint, layer_tools, scheduler, display, mp_tools
|
||||
|
|
|
@ -14,15 +14,19 @@
|
|||
|
||||
import os
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
import paddle
|
||||
from paddle import distributed as dist
|
||||
from paddle.nn import Layer
|
||||
from paddle.optimizer import Optimizer
|
||||
|
||||
from parakeet.utils import mp_tools
|
||||
|
||||
__all__ = ["load_parameters", "save_parameters"]
|
||||
|
||||
def _load_latest_checkpoint(checkpoint_dir):
|
||||
"""Get the iteration number corresponding to the latest saved checkpoint
|
||||
|
||||
def _load_latest_checkpoint(checkpoint_dir: str) -> int:
|
||||
"""Get the iteration number corresponding to the latest saved checkpoint.
|
||||
|
||||
Args:
|
||||
checkpoint_dir (str): the directory where checkpoint is saved.
|
||||
|
@ -31,18 +35,17 @@ def _load_latest_checkpoint(checkpoint_dir):
|
|||
int: the latest iteration number.
|
||||
"""
|
||||
checkpoint_record = os.path.join(checkpoint_dir, "checkpoint")
|
||||
# Create checkpoint index file if not exist.
|
||||
if (not os.path.isfile(checkpoint_record)):
|
||||
return 0
|
||||
|
||||
# Fetch the latest checkpoint index.
|
||||
with open(checkpoint_record, "r") as handle:
|
||||
with open(checkpoint_record, "rt") as handle:
|
||||
latest_checkpoint = handle.readline().split()[-1]
|
||||
iteration = int(latest_checkpoint.split("-")[-1])
|
||||
|
||||
return iteration
|
||||
|
||||
def _save_checkpoint(checkpoint_dir, iteration):
|
||||
def _save_checkpoint(checkpoint_dir: str, iteration: int):
|
||||
"""Save the iteration number of the latest model to be checkpointed.
|
||||
|
||||
Args:
|
||||
|
@ -54,7 +57,7 @@ def _save_checkpoint(checkpoint_dir, iteration):
|
|||
"""
|
||||
checkpoint_record = os.path.join(checkpoint_dir, "checkpoint")
|
||||
# Update the latest checkpoint index.
|
||||
with open(checkpoint_record, "w") as handle:
|
||||
with open(checkpoint_record, "wt") as handle:
|
||||
handle.write("model_checkpoint_path: step-{}".format(iteration))
|
||||
|
||||
def load_parameters(model,
|
||||
|
@ -64,8 +67,8 @@ def load_parameters(model,
|
|||
"""Load a specific model checkpoint from disk.
|
||||
|
||||
Args:
|
||||
model (obj): model to load parameters.
|
||||
optimizer (obj, optional): optimizer to load states if needed.
|
||||
model (Layer): model to load parameters.
|
||||
optimizer (Optimizer, optional): optimizer to load states if needed.
|
||||
Defaults to None.
|
||||
checkpoint_dir (str, optional): the directory where checkpoint is saved.
|
||||
checkpoint_path (str, optional): if specified, load the checkpoint
|
||||
|
@ -113,8 +116,8 @@ def save_parameters(checkpoint_dir, iteration, model, optimizer=None):
|
|||
Args:
|
||||
checkpoint_dir (str): the directory where checkpoint is saved.
|
||||
iteration (int): the latest iteration number.
|
||||
model (obj): model to be checkpointed.
|
||||
optimizer (obj, optional): optimizer to be checkpointed.
|
||||
model (Layer): model to be checkpointed.
|
||||
optimizer (Optimizer, optional): optimizer to be checkpointed.
|
||||
Defaults to None.
|
||||
|
||||
Returns:
|
||||
|
|
|
@ -2,6 +2,9 @@ import numpy as np
|
|||
import matplotlib
|
||||
from matplotlib import cm, pyplot
|
||||
|
||||
__all__ = ["pack_attention_images", "add_attention_plots", "min_max_normalize"]
|
||||
|
||||
|
||||
def pack_attention_images(attention_weights, rotate=False):
|
||||
# add a box
|
||||
attention_weights = np.pad(attention_weights,
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
import numpy as np
|
||||
from paddle.framework import core
|
||||
|
||||
__all__ = ["convert_dtype_to_np_dtype_"]
|
||||
|
||||
|
||||
def convert_dtype_to_np_dtype_(dtype):
|
||||
"""
|
||||
Convert paddle's data type to corrsponding numpy data type.
|
||||
|
|
|
@ -1,172 +0,0 @@
|
|||
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import time
|
||||
|
||||
import ruamel.yaml
|
||||
import numpy as np
|
||||
import paddle.fluid.dygraph as dg
|
||||
from paddle.fluid.framework import convert_np_dtype_to_dtype_ as convert_np_dtype
|
||||
|
||||
|
||||
def is_main_process():
|
||||
local_rank = dg.parallel.Env().local_rank
|
||||
return local_rank == 0
|
||||
|
||||
|
||||
def add_yaml_config_to_args(config):
|
||||
""" Add args in yaml config to the args parsed by argparse. The argument in
|
||||
yaml config will be overwritten by the same argument in argparse if they
|
||||
are both valid.
|
||||
|
||||
Args:
|
||||
config (args): the args returned by `argparse.ArgumentParser().parse_args()`
|
||||
|
||||
Returns:
|
||||
config: the args added yaml config.
|
||||
"""
|
||||
with open(config.config, 'rt') as f:
|
||||
yaml_cfg = ruamel.yaml.safe_load(f)
|
||||
cfg_vars = vars(config)
|
||||
for k, v in yaml_cfg.items():
|
||||
if k in cfg_vars and cfg_vars[k] is not None:
|
||||
continue
|
||||
cfg_vars[k] = v
|
||||
return config
|
||||
|
||||
|
||||
def _load_latest_checkpoint(checkpoint_dir):
|
||||
"""Get the iteration number corresponding to the latest saved checkpoint
|
||||
|
||||
Args:
|
||||
checkpoint_dir (str): the directory where checkpoint is saved.
|
||||
|
||||
Returns:
|
||||
int: the latest iteration number.
|
||||
"""
|
||||
checkpoint_record = os.path.join(checkpoint_dir, "checkpoint")
|
||||
# Create checkpoint index file if not exist.
|
||||
if (not os.path.isfile(checkpoint_record)):
|
||||
return 0
|
||||
|
||||
# Fetch the latest checkpoint index.
|
||||
with open(checkpoint_record, "r") as handle:
|
||||
latest_checkpoint = handle.readline().split()[-1]
|
||||
iteration = int(latest_checkpoint.split("-")[-1])
|
||||
|
||||
return iteration
|
||||
|
||||
|
||||
def _save_checkpoint(checkpoint_dir, iteration):
|
||||
"""Save the iteration number of the latest model to be checkpointed.
|
||||
|
||||
Args:
|
||||
checkpoint_dir (str): the directory where checkpoint is saved.
|
||||
iteration (int): the latest iteration number.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
checkpoint_record = os.path.join(checkpoint_dir, "checkpoint")
|
||||
# Update the latest checkpoint index.
|
||||
with open(checkpoint_record, "w") as handle:
|
||||
handle.write("model_checkpoint_path: step-{}".format(iteration))
|
||||
|
||||
|
||||
def load_parameters(model,
|
||||
optimizer=None,
|
||||
checkpoint_dir=None,
|
||||
iteration=None,
|
||||
checkpoint_path=None):
|
||||
"""Load a specific model checkpoint from disk.
|
||||
|
||||
Args:
|
||||
model (obj): model to load parameters.
|
||||
optimizer (obj, optional): optimizer to load states if needed.
|
||||
Defaults to None.
|
||||
checkpoint_dir (str, optional): the directory where checkpoint is saved.
|
||||
iteration (int, optional): if specified, load the specific checkpoint,
|
||||
if not specified, load the latest one. Defaults to None.
|
||||
checkpoint_path (str, optional): if specified, load the checkpoint
|
||||
stored in the checkpoint_path and the argument 'checkpoint_dir' will
|
||||
be ignored. Defaults to None.
|
||||
|
||||
Returns:
|
||||
iteration (int): number of iterations that the loaded checkpoint has
|
||||
been trained.
|
||||
"""
|
||||
if checkpoint_path is not None:
|
||||
iteration = int(os.path.basename(checkpoint_path).split("-")[-1])
|
||||
elif checkpoint_dir is not None:
|
||||
if iteration is None:
|
||||
iteration = _load_latest_checkpoint(checkpoint_dir)
|
||||
if iteration == 0:
|
||||
return iteration
|
||||
checkpoint_path = os.path.join(checkpoint_dir,
|
||||
"step-{}".format(iteration))
|
||||
else:
|
||||
raise ValueError(
|
||||
"At least one of 'checkpoint_dir' and 'checkpoint_path' should be specified!"
|
||||
)
|
||||
|
||||
local_rank = dg.parallel.Env().local_rank
|
||||
model_dict, optimizer_dict = dg.load_dygraph(checkpoint_path)
|
||||
|
||||
state_dict = model.state_dict()
|
||||
|
||||
# cast to desired data type, for mixed-precision training/inference.
|
||||
for k, v in model_dict.items():
|
||||
if k in state_dict and convert_np_dtype(v.dtype) != state_dict[
|
||||
k].dtype:
|
||||
model_dict[k] = v.astype(state_dict[k].numpy().dtype)
|
||||
|
||||
model.set_state_dict(model_dict)
|
||||
|
||||
print("[checkpoint] Rank {}: loaded model from {}.pdparams".format(
|
||||
local_rank, checkpoint_path))
|
||||
|
||||
if optimizer and optimizer_dict:
|
||||
optimizer.set_state_dict(optimizer_dict)
|
||||
print("[checkpoint] Rank {}: loaded optimizer state from {}.pdopt".
|
||||
format(local_rank, checkpoint_path))
|
||||
|
||||
return iteration
|
||||
|
||||
|
||||
def save_parameters(checkpoint_dir, iteration, model, optimizer=None):
|
||||
"""Checkpoint the latest trained model parameters.
|
||||
|
||||
Args:
|
||||
checkpoint_dir (str): the directory where checkpoint is saved.
|
||||
iteration (int): the latest iteration number.
|
||||
model (obj): model to be checkpointed.
|
||||
optimizer (obj, optional): optimizer to be checkpointed.
|
||||
Defaults to None.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
checkpoint_path = os.path.join(checkpoint_dir, "step-{}".format(iteration))
|
||||
model_dict = model.state_dict()
|
||||
dg.save_dygraph(model_dict, checkpoint_path)
|
||||
print("[checkpoint] Saved model to {}.pdparams".format(checkpoint_path))
|
||||
|
||||
if optimizer:
|
||||
opt_dict = optimizer.state_dict()
|
||||
dg.save_dygraph(opt_dict, checkpoint_path)
|
||||
print("[checkpoint] Saved optimzier state to {}.pdopt".format(
|
||||
checkpoint_path))
|
||||
|
||||
_save_checkpoint(checkpoint_dir, iteration)
|
|
@ -15,6 +15,8 @@
|
|||
import numpy as np
|
||||
from paddle import nn
|
||||
|
||||
__all__ = ["summary","gradient_norm", "freeze", "unfreeze"]
|
||||
|
||||
|
||||
def summary(layer: nn.Layer):
|
||||
num_params = num_elements = 0
|
||||
|
|
|
@ -2,6 +2,9 @@ import paddle
|
|||
from paddle import distributed as dist
|
||||
from functools import wraps
|
||||
|
||||
__all__ = ["rank_zero_only"]
|
||||
|
||||
|
||||
def rank_zero_only(func):
|
||||
local_rank = dist.get_rank()
|
||||
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
import math
|
||||
|
||||
__all__ = ["SchedulerBase", "Constant", "PieceWise", "StepWise"]
|
||||
|
||||
|
||||
class SchedulerBase(object):
|
||||
def __call__(self, step):
|
||||
raise NotImplementedError("You should implement the __call__ method.")
|
||||
|
|
Loading…
Reference in New Issue