use paddle's profiler
This commit is contained in:
parent
8dbcc9bccb
commit
042e02d242
|
@ -171,8 +171,9 @@ def train_sp(args, config):
|
||||||
updater,
|
updater,
|
||||||
stop_trigger=(10, "iteration"), # PROFILING
|
stop_trigger=(10, "iteration"), # PROFILING
|
||||||
out=output_dir, )
|
out=output_dir, )
|
||||||
with paddle.fluid.profiler.cuda_profiler(
|
with paddle.fluid.profiler.profiler('All', 'total',
|
||||||
str(output_dir / "profiler.log")) as prof:
|
str(output_dir / "profiler.log"),
|
||||||
|
'Default') as prof:
|
||||||
trainer.run()
|
trainer.run()
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -48,7 +48,8 @@ def read_hdf5(filename: Union[Path, str], dataset_name: str) -> Any:
|
||||||
f"There is no such a data in hdf5 file. ({dataset_name})")
|
f"There is no such a data in hdf5 file. ({dataset_name})")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
hdf5_data = hdf5_file[dataset_name][()] # a special syntax of h5py
|
# [()]: a special syntax of h5py to get the dataset as-is
|
||||||
|
hdf5_data = hdf5_file[dataset_name][()]
|
||||||
hdf5_file.close()
|
hdf5_file.close()
|
||||||
|
|
||||||
return hdf5_data
|
return hdf5_data
|
||||||
|
|
Loading…
Reference in New Issue