discard tests/ temporarily for outdated code
This commit is contained in:
parent
2c952fbd70
commit
c866bb0b57
|
@ -1,101 +0,0 @@
|
|||
import unittest
|
||||
import numpy as np
|
||||
import paddle
|
||||
paddle.set_default_dtype("float64")
|
||||
paddle.disable_static(paddle.CPUPlace())
|
||||
|
||||
from parakeet.modules import attention as attn
|
||||
|
||||
class TestScaledDotProductAttention(unittest.TestCase):
|
||||
def test_without_mask(self):
|
||||
x = paddle.randn([4, 16, 8])
|
||||
context_vector, attention_weights = attn.scaled_dot_product_attention(x, x, x)
|
||||
assert(list(context_vector.shape) == [4, 16, 8])
|
||||
assert(list(attention_weights.shape) == [4, 16, 16])
|
||||
|
||||
def test_with_mask(self):
|
||||
x = paddle.randn([4, 16, 8])
|
||||
mask = paddle.fluid.layers.sequence_mask(
|
||||
paddle.to_tensor([16, 15, 13, 14]), dtype=x.dtype)
|
||||
mask = mask.unsqueeze(1) # unsqueeze for the decoder time steps
|
||||
context_vector, attention_weights = attn.scaled_dot_product_attention(x, x, x, mask)
|
||||
assert(list(context_vector.shape) == [4, 16, 8])
|
||||
assert(list(attention_weights.shape) == [4, 16, 16])
|
||||
|
||||
def test_4d(self):
|
||||
x = paddle.randn([4, 6, 16, 8])
|
||||
context_vector, attention_weights = attn.scaled_dot_product_attention(x, x, x)
|
||||
assert(list(context_vector.shape) == [4, 6, 16, 8])
|
||||
assert(list(attention_weights.shape) == [4, 6, 16, 16])
|
||||
|
||||
|
||||
class TestMonoheadAttention(unittest.TestCase):
|
||||
def test_io(self):
|
||||
net = attn.MonoheadAttention(6, 0.1)
|
||||
q = paddle.randn([4, 18, 6])
|
||||
k = paddle.randn([4, 12, 6])
|
||||
v = paddle.randn([4, 12, 6])
|
||||
mask = paddle.fluid.layers.sequence_mask(
|
||||
paddle.to_tensor([12, 10, 8, 9]), dtype=q.dtype)
|
||||
mask = paddle.unsqueeze(mask, 1) # unsqueeze for time_steps_q
|
||||
context_vector, attn_weights = net(q, k, v, mask)
|
||||
self.assertTupleEqual(context_vector.numpy().shape, (4, 18, 6))
|
||||
self.assertTupleEqual(attn_weights.numpy().shape, (4, 18, 12))
|
||||
|
||||
|
||||
class TestDropHead(unittest.TestCase):
|
||||
def test_drop(self):
|
||||
x = paddle.randn([4, 6, 16, 8])
|
||||
out = attn.drop_head(x, 2, training=True)
|
||||
# drop 2 head from 6 at all positions
|
||||
np.testing.assert_allclose(np.sum(out.numpy() == 0., axis=1), 2)
|
||||
|
||||
def test_drop_all(self):
|
||||
x = paddle.randn([4, 6, 16, 8])
|
||||
out = attn.drop_head(x, 6, training=True)
|
||||
np.testing.assert_allclose(np.sum(out.numpy()), 0)
|
||||
|
||||
def test_eval(self):
|
||||
x = paddle.randn([4, 6, 16, 8])
|
||||
out = attn.drop_head(x, 6, training=False)
|
||||
self.assertIs(x, out)
|
||||
|
||||
|
||||
class TestMultiheadAttention(unittest.TestCase):
|
||||
def __init__(self, methodName="test_io", same_qk=True):
|
||||
super(TestMultiheadAttention, self).__init__(methodName)
|
||||
self.same_qk = same_qk
|
||||
|
||||
def setUp(self):
|
||||
if self.same_qk:
|
||||
net = attn.MultiheadAttention(64, 8, dropout=0.3)
|
||||
else:
|
||||
net = attn.MultiheadAttention(64, 8, k_dim=12, v_dim=6)
|
||||
self.net =net
|
||||
|
||||
def test_io(self):
|
||||
q = paddle.randn([4, 12, 64])
|
||||
mask = paddle.fluid.layers.sequence_mask(
|
||||
paddle.to_tensor([12, 10, 8, 9]), dtype=q.dtype)
|
||||
mask = paddle.unsqueeze(mask, 1) # unsqueeze for time_steps_q
|
||||
context_vector, attention_weights = self.net(q, q, q, mask)
|
||||
self.assertTupleEqual(context_vector.numpy().shape, (4, 12, 64))
|
||||
self.assertTupleEqual(attention_weights.numpy().shape, (4, 8, 12, 12))
|
||||
|
||||
|
||||
def load_tests(loader, standard_tests, pattern):
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(TestScaledDotProductAttention("test_without_mask"))
|
||||
suite.addTest(TestScaledDotProductAttention("test_with_mask"))
|
||||
suite.addTest(TestScaledDotProductAttention("test_4d"))
|
||||
|
||||
suite.addTest(TestDropHead("test_drop"))
|
||||
suite.addTest(TestDropHead("test_drop_all"))
|
||||
suite.addTest(TestDropHead("test_eval"))
|
||||
|
||||
suite.addTest(TestMonoheadAttention("test_io"))
|
||||
|
||||
suite.addTest(TestMultiheadAttention("test_io", same_qk=True))
|
||||
suite.addTest(TestMultiheadAttention("test_io", same_qk=False))
|
||||
|
||||
return suite
|
|
@ -1,34 +0,0 @@
|
|||
import unittest
|
||||
import paddle
|
||||
paddle.set_default_dtype("float64")
|
||||
paddle.disable_static(paddle.CPUPlace())
|
||||
from parakeet.modules import cbhg
|
||||
|
||||
|
||||
class TestHighway(unittest.TestCase):
|
||||
def test_io(self):
|
||||
net = cbhg.Highway(4)
|
||||
x = paddle.randn([2, 12, 4])
|
||||
y = net(x)
|
||||
self.assertTupleEqual(y.numpy().shape, (2, 12, 4))
|
||||
|
||||
|
||||
class TestCBHG(unittest.TestCase):
|
||||
def __init__(self, methodName="runTest", ):
|
||||
super(TestCBHG, self).__init__(methodName)
|
||||
|
||||
def test_io(self):
|
||||
self.net = cbhg.CBHG(64, 32, 16,
|
||||
projection_channels=[64, 128],
|
||||
num_highways=4, highway_features=128,
|
||||
gru_features=64)
|
||||
x = paddle.randn([4, 64, 32])
|
||||
y = self.net(x)
|
||||
self.assertTupleEqual(y.numpy().shape, (4, 32, 128))
|
||||
|
||||
def load_tests(loader, standard_tests, pattern):
|
||||
suite = unittest.TestSuite()
|
||||
|
||||
suite.addTest(TestHighway("test_io"))
|
||||
suite.addTest(TestCBHG("test_io"))
|
||||
return suite
|
|
@ -1,43 +0,0 @@
|
|||
import unittest
|
||||
import numpy as np
|
||||
|
||||
import paddle
|
||||
paddle.set_default_dtype("float64")
|
||||
paddle.disable_static(paddle.CPUPlace())
|
||||
|
||||
from parakeet.models import clarinet
|
||||
from parakeet.modules import stft
|
||||
|
||||
class TestParallelWaveNet(unittest.TestCase):
|
||||
def test_io(self):
|
||||
net = clarinet.ParallelWaveNet([8, 8, 8], [1, 1, 1], 16, 12, 2)
|
||||
x = paddle.randn([4, 6073])
|
||||
condition = paddle.randn([4, 12, 6073])
|
||||
z, out_mu, out_log_std = net(x, condition)
|
||||
self.assertTupleEqual(z.numpy().shape, (4, 6073))
|
||||
self.assertTupleEqual(out_mu.numpy().shape, (4, 6073))
|
||||
self.assertTupleEqual(out_log_std.numpy().shape, (4, 6073))
|
||||
|
||||
|
||||
class TestClariNet(unittest.TestCase):
|
||||
def setUp(self):
|
||||
encoder = clarinet.UpsampleNet([2, 2])
|
||||
teacher = clarinet.WaveNet(8, 3, 16, 3, 12, 2, "mog", -9.0)
|
||||
student = clarinet.ParallelWaveNet([8, 8, 8, 8, 8, 8], [1, 1, 1, 1, 1, 1], 16, 12, 2)
|
||||
stft_module = stft.STFT(16, 4, 8)
|
||||
net = clarinet.Clarinet(encoder, teacher, student, stft_module, -6.0, lmd=4)
|
||||
print("context size is: ", teacher.context_size)
|
||||
self.net = net
|
||||
|
||||
def test_io(self):
|
||||
audio = paddle.randn([4, 1366])
|
||||
mel = paddle.randn([4, 12, 512]) # 512 * 4 =2048
|
||||
audio_start = paddle.zeros([4], dtype="int64")
|
||||
loss = self.net(audio, mel, audio_start, clip_kl=True)
|
||||
loss["loss"].numpy()
|
||||
|
||||
def test_synthesis(self):
|
||||
mel = paddle.randn([4, 12, 512]) # 64 = 246 / 4
|
||||
out = self.net.synthesis(mel)
|
||||
self.assertTupleEqual(out.numpy().shape, (4, 2048))
|
||||
|
|
@ -1,33 +0,0 @@
|
|||
import unittest
|
||||
import paddle
|
||||
from paddle import nn
|
||||
paddle.disable_static(paddle.CPUPlace())
|
||||
paddle.set_default_dtype("float64")
|
||||
|
||||
from parakeet.modules import connections as conn
|
||||
|
||||
class TestPreLayerNormWrapper(unittest.TestCase):
|
||||
def test_io(self):
|
||||
net = nn.Linear(8, 8)
|
||||
net = conn.PreLayerNormWrapper(net, 8)
|
||||
x = paddle.randn([4, 8])
|
||||
y = net(x)
|
||||
self.assertTupleEqual(x.numpy().shape, y.numpy().shape)
|
||||
|
||||
|
||||
class TestPostLayerNormWrapper(unittest.TestCase):
|
||||
def test_io(self):
|
||||
net = nn.Linear(8, 8)
|
||||
net = conn.PostLayerNormWrapper(net, 8)
|
||||
x = paddle.randn([4, 8])
|
||||
y = net(x)
|
||||
self.assertTupleEqual(x.numpy().shape, y.numpy().shape)
|
||||
|
||||
|
||||
class TestResidualWrapper(unittest.TestCase):
|
||||
def test_io(self):
|
||||
net = nn.Linear(8, 8)
|
||||
net = conn.ResidualWrapper(net)
|
||||
x = paddle.randn([4, 8])
|
||||
y = net(x)
|
||||
self.assertTupleEqual(x.numpy().shape, y.numpy().shape)
|
|
@ -1,67 +0,0 @@
|
|||
import paddle
|
||||
paddle.set_default_dtype("float64")
|
||||
paddle.disable_static(paddle.CPUPlace())
|
||||
import unittest
|
||||
import numpy as np
|
||||
|
||||
from parakeet.modules import conv
|
||||
|
||||
class TestConv1dCell(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.net = conv.Conv1dCell(4, 6, 5, dilation=2)
|
||||
|
||||
def forward_incremental(self, x):
|
||||
outs = []
|
||||
self.net.start_sequence()
|
||||
with paddle.no_grad():
|
||||
for i in range(x.shape[-1]):
|
||||
xt = x[:, :, i]
|
||||
yt = self.net.add_input(xt)
|
||||
outs.append(yt)
|
||||
y2 = paddle.stack(outs, axis=-1)
|
||||
return y2
|
||||
|
||||
def test_equality(self):
|
||||
x = paddle.randn([2, 4, 16])
|
||||
y1 = self.net(x)
|
||||
|
||||
self.net.eval()
|
||||
y2 = self.forward_incremental(x)
|
||||
|
||||
np.testing.assert_allclose(y2.numpy(), y1.numpy())
|
||||
|
||||
|
||||
class TestConv1dBatchNorm(unittest.TestCase):
|
||||
def __init__(self, methodName="runTest", causal=False, channel_last=False):
|
||||
super(TestConv1dBatchNorm, self).__init__(methodName)
|
||||
self.causal = causal
|
||||
self.channel_last = channel_last
|
||||
|
||||
def setUp(self):
|
||||
k = 5
|
||||
paddding = (k - 1, 0) if self.causal else ((k-1) // 2, k //2)
|
||||
self.net = conv.Conv1dBatchNorm(4, 6, (k,), 1, padding=paddding,
|
||||
data_format="NLC" if self.channel_last else "NCL")
|
||||
|
||||
def test_input_output(self):
|
||||
x = paddle.randn([4, 16, 4]) if self.channel_last else paddle.randn([4, 4, 16])
|
||||
out = self.net(x)
|
||||
out_np = out.numpy()
|
||||
if self.channel_last:
|
||||
self.assertTupleEqual(out_np.shape, (4, 16, 6))
|
||||
else:
|
||||
self.assertTupleEqual(out_np.shape, (4, 6, 16))
|
||||
|
||||
def runTest(self):
|
||||
self.test_input_output()
|
||||
|
||||
|
||||
def load_tests(loader, standard_tests, pattern):
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(TestConv1dBatchNorm("runTest", True, True))
|
||||
suite.addTest(TestConv1dBatchNorm("runTest", False, False))
|
||||
suite.addTest(TestConv1dBatchNorm("runTest", True, False))
|
||||
suite.addTest(TestConv1dBatchNorm("runTest", False, True))
|
||||
suite.addTest(TestConv1dCell("test_equality"))
|
||||
|
||||
return suite
|
|
@ -1,122 +0,0 @@
|
|||
import unittest
|
||||
import numpy as np
|
||||
import paddle
|
||||
from paddle import io
|
||||
from parakeet import data
|
||||
|
||||
class MyDataset(io.Dataset):
|
||||
def __init__(self, size):
|
||||
self._data = np.random.randn(size, 6)
|
||||
|
||||
def __getitem__(self, i):
|
||||
return self._data[i]
|
||||
|
||||
def __len__(self):
|
||||
return self._data.shape[0]
|
||||
|
||||
|
||||
class TestTransformDataset(unittest.TestCase):
|
||||
def test(self):
|
||||
dataset = MyDataset(20)
|
||||
dataset = data.TransformDataset(dataset, lambda x: np.abs(x))
|
||||
dataloader = io.DataLoader(dataset, batch_size=4, shuffle=True, num_workers=1)
|
||||
print("TransformDataset")
|
||||
for batch, in dataloader:
|
||||
print(type(batch), batch.dtype, batch.shape)
|
||||
|
||||
|
||||
class TestChainDataset(unittest.TestCase):
|
||||
def test(self):
|
||||
dataset1 = MyDataset(20)
|
||||
dataset2 = MyDataset(40)
|
||||
dataset = data.ChainDataset(dataset1, dataset2)
|
||||
dataloader = io.DataLoader(dataset, batch_size=4, shuffle=True, num_workers=1)
|
||||
print("ChainDataset")
|
||||
for batch, in dataloader:
|
||||
print(type(batch), batch.dtype, batch.shape)
|
||||
|
||||
|
||||
class TestTupleDataset(unittest.TestCase):
|
||||
def test(self):
|
||||
dataset1 = MyDataset(20)
|
||||
dataset2 = MyDataset(20)
|
||||
dataset = data.TupleDataset(dataset1, dataset2)
|
||||
dataloader = io.DataLoader(dataset, batch_size=4, shuffle=True, num_workers=1)
|
||||
print("TupleDataset")
|
||||
for field1, field2 in dataloader:
|
||||
print(type(field1), field1.dtype, field1.shape)
|
||||
print(type(field2), field2.dtype, field2.shape)
|
||||
|
||||
|
||||
class TestDictDataset(unittest.TestCase):
|
||||
def test(self):
|
||||
dataset1 = MyDataset(20)
|
||||
dataset2 = MyDataset(20)
|
||||
dataset = data.DictDataset(field1=dataset1, field2=dataset2)
|
||||
def collate_fn(examples):
|
||||
examples_tuples = []
|
||||
for example in examples:
|
||||
examples_tuples.append(example.values())
|
||||
return paddle.fluid.dataloader.dataloader_iter.default_collate_fn(examples_tuples)
|
||||
|
||||
dataloader = io.DataLoader(dataset, batch_size=4, shuffle=True, num_workers=1, collate_fn=collate_fn)
|
||||
print("DictDataset")
|
||||
for field1, field2 in dataloader:
|
||||
print(type(field1), field1.dtype, field1.shape)
|
||||
print(type(field2), field2.dtype, field2.shape)
|
||||
|
||||
|
||||
class TestSliceDataset(unittest.TestCase):
|
||||
def test(self):
|
||||
dataset = MyDataset(40)
|
||||
dataset = data.SliceDataset(dataset, 0, 20)
|
||||
dataloader = io.DataLoader(dataset, batch_size=4, shuffle=True, num_workers=1)
|
||||
print("SliceDataset")
|
||||
for batch, in dataloader:
|
||||
print(type(batch), batch.dtype, batch.shape)
|
||||
|
||||
|
||||
class TestSplit(unittest.TestCase):
|
||||
def test(self):
|
||||
dataset = MyDataset(40)
|
||||
train, valid = data.split(dataset, 10)
|
||||
dataloader1 = io.DataLoader(train, batch_size=4, shuffle=True, num_workers=1)
|
||||
dataloader2 = io.DataLoader(valid, batch_size=4, shuffle=True, num_workers=1)
|
||||
print("First Dataset")
|
||||
for batch, in dataloader1:
|
||||
print(type(batch), batch.dtype, batch.shape)
|
||||
|
||||
print("Second Dataset")
|
||||
for batch, in dataloader2:
|
||||
print(type(batch), batch.dtype, batch.shape)
|
||||
|
||||
|
||||
class TestSubsetDataset(unittest.TestCase):
|
||||
def test(self):
|
||||
dataset = MyDataset(40)
|
||||
indices = np.random.choice(np.arange(40), [20], replace=False).tolist()
|
||||
dataset = data.SubsetDataset(dataset, indices)
|
||||
dataloader = io.DataLoader(dataset, batch_size=4, shuffle=True, num_workers=1)
|
||||
print("SubsetDataset")
|
||||
for batch, in dataloader:
|
||||
print(type(batch), batch.dtype, batch.shape)
|
||||
|
||||
|
||||
class TestFilterDataset(unittest.TestCase):
|
||||
def test(self):
|
||||
dataset = MyDataset(40)
|
||||
dataset = data.FilterDataset(dataset, lambda x: np.mean(x)> 0.3)
|
||||
dataloader = io.DataLoader(dataset, batch_size=4, shuffle=True, num_workers=1)
|
||||
print("FilterDataset")
|
||||
for batch, in dataloader:
|
||||
print(type(batch), batch.dtype, batch.shape)
|
||||
|
||||
|
||||
class TestCacheDataset(unittest.TestCase):
|
||||
def test(self):
|
||||
dataset = MyDataset(40)
|
||||
dataset = data.CacheDataset(dataset)
|
||||
dataloader = io.DataLoader(dataset, batch_size=4, shuffle=True, num_workers=1)
|
||||
print("CacheDataset")
|
||||
for batch, in dataloader:
|
||||
print(type(batch), batch.dtype, batch.shape)
|
|
@ -1,107 +0,0 @@
|
|||
import numpy as np
|
||||
import unittest
|
||||
import paddle
|
||||
paddle.set_default_dtype("float64")
|
||||
paddle.disable_static(paddle.CPUPlace())
|
||||
|
||||
from parakeet.models import deepvoice3 as dv3
|
||||
|
||||
class TestConvBlock(unittest.TestCase):
|
||||
def test_io_causal(self):
|
||||
net = dv3.ConvBlock(6, 5, True, True, 8, 0.9)
|
||||
x = paddle.randn([4, 32, 6])
|
||||
condition = paddle.randn([4, 8])
|
||||
# TODO(chenfeiyu): to report an issue on default data type
|
||||
padding = paddle.zeros([4, 4, 6], dtype=x.dtype)
|
||||
y = net.forward(x, condition, padding)
|
||||
self.assertTupleEqual(y.numpy().shape, (4, 32, 6))
|
||||
|
||||
def test_io_non_causal(self):
|
||||
net = dv3.ConvBlock(6, 5, False, True, 8, 0.9)
|
||||
x = paddle.randn([4, 32, 6])
|
||||
condition = paddle.randn([4, 8])
|
||||
y = net.forward(x, condition)
|
||||
self.assertTupleEqual(y.numpy().shape, (4, 32, 6))
|
||||
|
||||
|
||||
class TestAffineBlock1(unittest.TestCase):
|
||||
def test_io(self):
|
||||
net = dv3.AffineBlock1(6, 16, True, 8)
|
||||
x = paddle.randn([4, 32, 6])
|
||||
condition = paddle.randn([4, 8])
|
||||
y = net(x, condition)
|
||||
self.assertTupleEqual(y.numpy().shape, (4, 32, 16))
|
||||
|
||||
|
||||
class TestAffineBlock2(unittest.TestCase):
|
||||
def test_io(self):
|
||||
net = dv3.AffineBlock2(6, 16, True, 8)
|
||||
x = paddle.randn([4, 32, 6])
|
||||
condition = paddle.randn([4, 8])
|
||||
y = net(x, condition)
|
||||
self.assertTupleEqual(y.numpy().shape, (4, 32, 16))
|
||||
|
||||
|
||||
class TestEncoder(unittest.TestCase):
|
||||
def test_io(self):
|
||||
net = dv3.Encoder(5, 8, 16, 5, True, 6)
|
||||
x = paddle.randn([4, 32, 8])
|
||||
condition = paddle.randn([4, 6])
|
||||
keys, values = net(x, condition)
|
||||
self.assertTupleEqual(keys.numpy().shape, (4, 32, 8))
|
||||
self.assertTupleEqual(values.numpy().shape, (4, 32, 8))
|
||||
|
||||
|
||||
class TestAttentionBlock(unittest.TestCase):
|
||||
def test_io(self):
|
||||
net = dv3.AttentionBlock(16, 6, has_bias=True, bias_dim=8)
|
||||
q = paddle.randn([4, 32, 6])
|
||||
k = paddle.randn([4, 24, 6])
|
||||
v = paddle.randn([4, 24, 6])
|
||||
lengths = paddle.to_tensor([24, 20, 19, 23], dtype="int64")
|
||||
condition = paddle.randn([4, 8])
|
||||
context_vector, attention_weight = net(q, k, v, lengths, condition, 0)
|
||||
self.assertTupleEqual(context_vector.numpy().shape, (4, 32, 6))
|
||||
self.assertTupleEqual(attention_weight.numpy().shape, (4, 32, 24))
|
||||
|
||||
def test_io_with_previous_attn(self):
|
||||
net = dv3.AttentionBlock(16, 6, has_bias=True, bias_dim=8)
|
||||
q = paddle.randn([4, 32, 6])
|
||||
k = paddle.randn([4, 24, 6])
|
||||
v = paddle.randn([4, 24, 6])
|
||||
lengths = paddle.to_tensor([24, 20, 19, 23], dtype="int64")
|
||||
condition = paddle.randn([4, 8])
|
||||
prev_attn_weight = paddle.randn([4, 32, 16])
|
||||
|
||||
context_vector, attention_weight = net(
|
||||
q, k, v, lengths, condition, 0,
|
||||
force_monotonic=True, prev_coeffs=prev_attn_weight, window=(0, 4))
|
||||
self.assertTupleEqual(context_vector.numpy().shape, (4, 32, 6))
|
||||
self.assertTupleEqual(attention_weight.numpy().shape, (4, 32, 24))
|
||||
|
||||
|
||||
class TestDecoder(unittest.TestCase):
|
||||
def test_io(self):
|
||||
net = dv3.Decoder(8, 4, [4, 12], 5, 3, 16, 1.0, 1.45, True, 6)
|
||||
x = paddle.randn([4, 32, 8])
|
||||
k = paddle.randn([4, 24, 12]) # prenet's last size should equals k's feature size
|
||||
v = paddle.randn([4, 24, 12])
|
||||
lengths = paddle.to_tensor([24, 18, 19, 22])
|
||||
condition = paddle.randn([4, 6])
|
||||
decoded, hidden, attentions, final_state = net(x, k, v, lengths, 0, condition)
|
||||
self.assertTupleEqual(decoded.numpy().shape, (4, 32, 4 * 8))
|
||||
self.assertTupleEqual(hidden.numpy().shape, (4, 32, 12))
|
||||
self.assertEqual(len(attentions), 5)
|
||||
self.assertTupleEqual(attentions[0].numpy().shape, (4, 32, 24))
|
||||
self.assertEqual(len(final_state), 5)
|
||||
self.assertTupleEqual(final_state[0].numpy().shape, (4, 2, 12))
|
||||
|
||||
|
||||
class TestPostNet(unittest.TestCase):
|
||||
def test_io(self):
|
||||
net = dv3.PostNet(3, 8, 16, 3, 12, 4, True, 6)
|
||||
x = paddle.randn([4, 32, 8])
|
||||
condition = paddle.randn([4, 6])
|
||||
y = net(x, condition)
|
||||
self.assertTupleEqual(y.numpy().shape, (4, 32 * 4, 12))
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
import unittest
|
||||
import numpy as np
|
||||
|
||||
import paddle
|
||||
paddle.set_default_dtype("float64")
|
||||
paddle.disable_static(paddle.CPUPlace())
|
||||
|
||||
from parakeet.modules import geometry as geo
|
||||
|
||||
class TestShuffleDim(unittest.TestCase):
|
||||
def test_perm(self):
|
||||
x = paddle.randn([2, 3, 4, 6])
|
||||
y = geo.shuffle_dim(x, 2, [3, 2, 1, 0])
|
||||
np.testing.assert_allclose(x.numpy()[0, 0, :, 0], y.numpy()[0, 0, ::-1, 0])
|
||||
|
||||
def test_random_perm(self):
|
||||
x = paddle.randn([2, 3, 4, 6])
|
||||
y = geo.shuffle_dim(x, 2)
|
||||
np.testing.assert_allclose(x.numpy().sum(2), y.numpy().sum(2))
|
|
@ -1,33 +0,0 @@
|
|||
import unittest
|
||||
import paddle
|
||||
paddle.set_device("cpu")
|
||||
import numpy as np
|
||||
|
||||
from parakeet.modules.losses import weighted_mean, masked_l1_loss, masked_softmax_with_cross_entropy
|
||||
|
||||
class TestWeightedMean(unittest.TestCase):
|
||||
def test(self):
|
||||
x = paddle.arange(0, 10, dtype="float64").unsqueeze(-1).broadcast_to([10, 3])
|
||||
mask = (paddle.arange(0, 10, dtype="float64") > 4).unsqueeze(-1)
|
||||
loss = weighted_mean(x, mask)
|
||||
self.assertAlmostEqual(loss.numpy()[0], 7)
|
||||
|
||||
|
||||
class TestMaskedL1Loss(unittest.TestCase):
|
||||
def test(self):
|
||||
x = paddle.arange(0, 10, dtype="float64").unsqueeze(-1).broadcast_to([10, 3])
|
||||
y = paddle.zeros_like(x)
|
||||
mask = (paddle.arange(0, 10, dtype="float64") > 4).unsqueeze(-1)
|
||||
loss = masked_l1_loss(x, y, mask)
|
||||
print(loss)
|
||||
self.assertAlmostEqual(loss.numpy()[0], 7)
|
||||
|
||||
|
||||
class TestMaskedCrossEntropy(unittest.TestCase):
|
||||
def test(self):
|
||||
x = paddle.randn([3, 30, 8], dtype="float64")
|
||||
y = paddle.randint(0, 8, [3, 30], dtype="int64").unsqueeze(-1) # mind this
|
||||
mask = paddle.fluid.layers.sequence_mask(
|
||||
paddle.to_tensor([30, 18, 27]), dtype="int64").unsqueeze(-1)
|
||||
loss = masked_softmax_with_cross_entropy(x, y, mask)
|
||||
print(loss)
|
|
@ -1,54 +0,0 @@
|
|||
import unittest
|
||||
import numpy as np
|
||||
import paddle
|
||||
paddle.set_default_dtype("float64")
|
||||
|
||||
from parakeet.modules import masking
|
||||
|
||||
|
||||
def sequence_mask(lengths, max_length=None, dtype="bool"):
|
||||
max_length = max_length or np.max(lengths)
|
||||
ids = np.arange(max_length)
|
||||
return (ids < np.expand_dims(lengths, -1)).astype(dtype)
|
||||
|
||||
def future_mask(lengths, max_length=None, dtype="bool"):
|
||||
max_length = max_length or np.max(lengths)
|
||||
return np.tril(np.tril(np.ones(max_length))).astype(dtype)
|
||||
|
||||
class TestIDMask(unittest.TestCase):
|
||||
def test(self):
|
||||
ids = paddle.to_tensor(
|
||||
[[1, 2, 3, 0, 0, 0],
|
||||
[2, 4, 5, 6, 0, 0],
|
||||
[7, 8, 9, 0, 0, 0]]
|
||||
)
|
||||
mask = masking.id_mask(ids)
|
||||
self.assertTupleEqual(mask.numpy().shape, ids.numpy().shape)
|
||||
print(mask.numpy())
|
||||
|
||||
class TestFeatureMask(unittest.TestCase):
|
||||
def test(self):
|
||||
features = np.random.randn(3, 16, 8)
|
||||
lengths = [16, 14, 12]
|
||||
for i, length in enumerate(lengths):
|
||||
features[i, length:, :] = 0
|
||||
|
||||
feature_tensor = paddle.to_tensor(features)
|
||||
mask = masking.feature_mask(feature_tensor, -1)
|
||||
self.assertTupleEqual(mask.numpy().shape, (3, 16, 1))
|
||||
print(mask.numpy().squeeze())
|
||||
|
||||
|
||||
class TestCombineMask(unittest.TestCase):
|
||||
def test_bool_mask(self):
|
||||
lengths = np.array([12, 8, 9, 10])
|
||||
padding_mask = sequence_mask(lengths, dtype="bool")
|
||||
no_future_mask = future_mask(lengths, dtype="bool")
|
||||
combined_mask1 = np.expand_dims(padding_mask, 1) * no_future_mask
|
||||
|
||||
print(paddle.to_tensor(padding_mask).dtype)
|
||||
print(paddle.to_tensor(no_future_mask).dtype)
|
||||
combined_mask2 = masking.combine_mask(
|
||||
paddle.to_tensor(padding_mask).unsqueeze(1), paddle.to_tensor(no_future_mask)
|
||||
)
|
||||
np.testing.assert_allclose(combined_mask2.numpy(), combined_mask1)
|
|
@ -1,64 +0,0 @@
|
|||
import unittest
|
||||
import numpy as np
|
||||
import paddle
|
||||
|
||||
from parakeet.modules import positional_encoding as pe
|
||||
|
||||
def positional_encoding(start_index, length, size, dtype="float32"):
|
||||
if (size % 2 != 0):
|
||||
raise ValueError("size should be divisible by 2")
|
||||
channel = np.arange(0, size, 2, dtype=dtype)
|
||||
index = np.arange(start_index, start_index + length, 1, dtype=dtype)
|
||||
p = np.expand_dims(index, -1) / (10000 ** (channel / float(size)))
|
||||
encodings = np.concatenate([np.sin(p), np.cos(p)], axis=-1)
|
||||
return encodings
|
||||
|
||||
def scalable_positional_encoding(start_index, length, size, omega):
|
||||
dtype = omega.dtype
|
||||
index = np.arange(start_index, start_index + length, 1, dtype=dtype)
|
||||
channel = np.arange(0, size, 2, dtype=dtype)
|
||||
|
||||
p = np.reshape(omega, omega.shape + (1, 1)) \
|
||||
* np.expand_dims(index, -1) \
|
||||
/ (10000 ** (channel / float(size)))
|
||||
|
||||
encodings = np.concatenate([np.sin(p), np.cos(p)], axis=-1)
|
||||
return encodings
|
||||
|
||||
class TestPositionEncoding(unittest.TestCase):
|
||||
def __init__(self, start=0, length=20, size=16, dtype="float64"):
|
||||
super(TestPositionEncoding, self).__init__("runTest")
|
||||
self.spec = (start, length, size, dtype)
|
||||
|
||||
def test_equality(self):
|
||||
start, length, size, dtype = self.spec
|
||||
position_embed1 = positional_encoding(start, length, size, dtype)
|
||||
position_embed2 = pe.positional_encoding(start, length, size, dtype)
|
||||
np.testing.assert_allclose(position_embed2.numpy(), position_embed1)
|
||||
|
||||
def runTest(self):
|
||||
paddle.disable_static(paddle.CPUPlace())
|
||||
self.test_equality()
|
||||
|
||||
class TestScalablePositionEncoding(unittest.TestCase):
|
||||
def __init__(self, start=0, length=20, size=16, dtype="float64"):
|
||||
super(TestScalablePositionEncoding, self).__init__("runTest")
|
||||
self.spec = (start, length, size, dtype)
|
||||
|
||||
def test_equality(self):
|
||||
start, length, size, dtype = self.spec
|
||||
omega = np.random.uniform(1, 2, size=(4,)).astype(dtype)
|
||||
position_embed1 = scalable_positional_encoding(start, length, size, omega)
|
||||
position_embed2 = pe.scalable_positional_encoding(start, length, size, paddle.to_tensor(omega))
|
||||
np.testing.assert_allclose(position_embed2.numpy(), position_embed1)
|
||||
|
||||
def runTest(self):
|
||||
paddle.disable_static(paddle.CPUPlace())
|
||||
self.test_equality()
|
||||
|
||||
|
||||
def load_tests(loader, standard_tests, pattern):
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(TestPositionEncoding(0, 20, 16, "float64"))
|
||||
suite.addTest(TestScalablePositionEncoding(0, 20, 16))
|
||||
return suite
|
|
@ -1,27 +0,0 @@
|
|||
import unittest
|
||||
import numpy as np
|
||||
import librosa
|
||||
import paddle
|
||||
paddle.set_default_dtype("float64")
|
||||
paddle.disable_static(paddle.CPUPlace())
|
||||
|
||||
from parakeet.modules import stft
|
||||
|
||||
class TestSTFT(unittest.TestCase):
|
||||
def test(self):
|
||||
path = librosa.util.example("choice")
|
||||
wav, sr = librosa.load(path, duration=5)
|
||||
wav = wav.astype("float64")
|
||||
|
||||
spec = librosa.stft(wav, n_fft=2048, hop_length=256, win_length=1024)
|
||||
mag1 = np.abs(spec)
|
||||
|
||||
wav_in_batch = paddle.unsqueeze(paddle.to_tensor(wav), 0)
|
||||
mag2 = stft.STFT(2048, 256, 1024).magnitude(wav_in_batch)
|
||||
mag2 = paddle.squeeze(mag2, [0, 2]).numpy()
|
||||
|
||||
print("mag1", mag1)
|
||||
print("mag2", mag2)
|
||||
# TODO(chenfeiyu): Is there something wrong? there is some elements that
|
||||
# does not match
|
||||
# np.testing.assert_allclose(mag2, mag1)
|
|
@ -1,43 +0,0 @@
|
|||
import unittest
|
||||
import numpy as np
|
||||
import paddle
|
||||
paddle.set_default_dtype("float64")
|
||||
paddle.disable_static(paddle.CPUPlace())
|
||||
|
||||
from parakeet.modules import transformer
|
||||
|
||||
class TestPositionwiseFFN(unittest.TestCase):
|
||||
def test_io(self):
|
||||
net = transformer.PositionwiseFFN(8, 12)
|
||||
x = paddle.randn([2, 3, 4, 8])
|
||||
y = net(x)
|
||||
self.assertTupleEqual(y.numpy().shape, (2, 3, 4, 8))
|
||||
|
||||
|
||||
class TestTransformerEncoderLayer(unittest.TestCase):
|
||||
def test_io(self):
|
||||
net = transformer.TransformerEncoderLayer(64, 8, 128, 0.5)
|
||||
x = paddle.randn([4, 12, 64])
|
||||
lengths = paddle.to_tensor([12, 8, 9, 10])
|
||||
mask = paddle.fluid.layers.sequence_mask(lengths, dtype=x.dtype)
|
||||
y, attn_weights = net(x, mask)
|
||||
|
||||
self.assertTupleEqual(y.numpy().shape, (4, 12, 64))
|
||||
self.assertTupleEqual(attn_weights.numpy().shape, (4, 8, 12, 12))
|
||||
|
||||
|
||||
class TestTransformerDecoderLayer(unittest.TestCase):
|
||||
def test_io(self):
|
||||
net = transformer.TransformerDecoderLayer(64, 8, 128, 0.5)
|
||||
q = paddle.randn([4, 32, 64])
|
||||
k = paddle.randn([4, 24, 64])
|
||||
v = paddle.randn([4, 24, 64])
|
||||
enc_lengths = paddle.to_tensor([24, 18, 20, 22])
|
||||
dec_lengths = paddle.to_tensor([32, 28, 30, 31])
|
||||
enc_mask = paddle.fluid.layers.sequence_mask(enc_lengths, dtype=k.dtype)
|
||||
dec_mask = paddle.fluid.layers.sequence_mask(dec_lengths, dtype=q.dtype)
|
||||
y, self_attn_weights, cross_attn_weights = net(q, k, v, enc_mask, dec_mask)
|
||||
|
||||
self.assertTupleEqual(y.numpy().shape, (4, 32, 64))
|
||||
self.assertTupleEqual(self_attn_weights.numpy().shape, (4, 8, 32, 32))
|
||||
self.assertTupleEqual(cross_attn_weights.numpy().shape, (4, 8, 32, 24))
|
|
@ -1,121 +0,0 @@
|
|||
import unittest
|
||||
import numpy as np
|
||||
import paddle
|
||||
paddle.set_default_dtype("float64")
|
||||
paddle.disable_static(paddle.CPUPlace())
|
||||
|
||||
from parakeet.models import transformer_tts as tts
|
||||
from parakeet.modules import masking
|
||||
from pprint import pprint
|
||||
|
||||
class TestMultiheadAttention(unittest.TestCase):
|
||||
def test_io_same_qk(self):
|
||||
net = tts.MultiheadAttention(64, 8)
|
||||
q = paddle.randn([4, 12, 64])
|
||||
mask = paddle.fluid.layers.sequence_mask(
|
||||
paddle.to_tensor([12, 10, 8, 9]), dtype=q.dtype)
|
||||
mask = paddle.unsqueeze(mask, 1) # unsqueeze for time_steps_q
|
||||
context_vector, attention_weights = net(q, q, q, mask, drop_n_heads=2)
|
||||
self.assertTupleEqual(context_vector.numpy().shape, (4, 12, 64))
|
||||
self.assertTupleEqual(attention_weights.numpy().shape, (4, 8, 12, 12))
|
||||
|
||||
def test_io(self):
|
||||
net = tts.MultiheadAttention(64, 8, k_dim=12, v_dim=6)
|
||||
q = paddle.randn([4, 12, 64])
|
||||
mask = paddle.fluid.layers.sequence_mask(
|
||||
paddle.to_tensor([12, 10, 8, 9]), dtype=q.dtype)
|
||||
mask = paddle.unsqueeze(mask, 1) # unsqueeze for time_steps_q
|
||||
context_vector, attention_weights = net(q, q, q, mask, drop_n_heads=2)
|
||||
self.assertTupleEqual(context_vector.numpy().shape, (4, 12, 64))
|
||||
self.assertTupleEqual(attention_weights.numpy().shape, (4, 8, 12, 12))
|
||||
|
||||
|
||||
class TestTransformerEncoderLayer(unittest.TestCase):
|
||||
def test_io(self):
|
||||
net = tts.TransformerEncoderLayer(64, 8, 128)
|
||||
x = paddle.randn([4, 12, 64])
|
||||
mask = paddle.fluid.layers.sequence_mask(
|
||||
paddle.to_tensor([12, 10, 8, 9]), dtype=x.dtype)
|
||||
context_vector, attention_weights = net(x, mask)
|
||||
self.assertTupleEqual(context_vector.numpy().shape, (4, 12, 64))
|
||||
self.assertTupleEqual(attention_weights.numpy().shape, (4, 8, 12, 12))
|
||||
|
||||
|
||||
class TestTransformerDecoderLayer(unittest.TestCase):
|
||||
def test_io(self):
|
||||
net = tts.TransformerDecoderLayer(64, 8, 128, 0.5)
|
||||
q = paddle.randn([4, 32, 64])
|
||||
k = paddle.randn([4, 24, 64])
|
||||
v = paddle.randn([4, 24, 64])
|
||||
enc_lengths = paddle.to_tensor([24, 18, 20, 22])
|
||||
dec_lengths = paddle.to_tensor([32, 28, 30, 31])
|
||||
enc_mask = masking.sequence_mask(enc_lengths, dtype=k.dtype)
|
||||
dec_padding_mask = masking.sequence_mask(dec_lengths, dtype=q.dtype)
|
||||
no_future_mask = masking.future_mask(32, dtype=q.dtype)
|
||||
dec_mask = masking.combine_mask(dec_padding_mask.unsqueeze(-1), no_future_mask)
|
||||
y, self_attn_weights, cross_attn_weights = net(q, k, v, enc_mask, dec_mask)
|
||||
|
||||
self.assertTupleEqual(y.numpy().shape, (4, 32, 64))
|
||||
self.assertTupleEqual(self_attn_weights.numpy().shape, (4, 8, 32, 32))
|
||||
self.assertTupleEqual(cross_attn_weights.numpy().shape, (4, 8, 32, 24))
|
||||
|
||||
|
||||
class TestTransformerTTS(unittest.TestCase):
|
||||
def setUp(self):
|
||||
net = tts.TransformerTTS(
|
||||
128, 0, 64, 128, 80, 4, 128,
|
||||
6, 6, 128, 128, 4,
|
||||
3, 10, 0.1)
|
||||
self.net = net
|
||||
|
||||
def test_encode_io(self):
|
||||
net = self.net
|
||||
|
||||
text = paddle.randint(0, 128, [4, 176])
|
||||
lengths = paddle.to_tensor([176, 156, 174, 168])
|
||||
mask = masking.sequence_mask(lengths, dtype=text.dtype)
|
||||
text = text * mask
|
||||
|
||||
encoded, attention_weights, encoder_mask = net.encode(text)
|
||||
print("output shapes:")
|
||||
print("encoded:", encoded.numpy().shape)
|
||||
print("encoder_attentions:", [item.shape for item in attention_weights])
|
||||
print("encoder_mask:", encoder_mask.numpy().shape)
|
||||
|
||||
def test_all_io(self):
|
||||
net = self.net
|
||||
|
||||
text = paddle.randint(0, 128, [4, 176])
|
||||
lengths = paddle.to_tensor([176, 156, 174, 168])
|
||||
mask = masking.sequence_mask(lengths, dtype=text.dtype)
|
||||
text = text * mask
|
||||
|
||||
mel = paddle.randn([4, 189, 80])
|
||||
frames = paddle.to_tensor([189, 186, 179, 174])
|
||||
mask = masking.sequence_mask(frames, dtype=frames.dtype)
|
||||
mel = mel * mask.unsqueeze(-1)
|
||||
|
||||
encoded, encoder_attention_weights, encoder_mask = net.encode(text)
|
||||
mel_output, mel_intermediate, cross_attention_weights, stop_logits = net.decode(encoded, mel, encoder_mask)
|
||||
|
||||
print("output shapes:")
|
||||
print("encoder_output:", encoded.numpy().shape)
|
||||
print("encoder_attentions:", [item.shape for item in encoder_attention_weights])
|
||||
print("encoder_mask:", encoder_mask.numpy().shape)
|
||||
print("mel_output: ", mel_output.numpy().shape)
|
||||
print("mel_intermediate: ", mel_intermediate.numpy().shape)
|
||||
print("decoder_attentions:", [item.shape for item in cross_attention_weights])
|
||||
print("stop_logits:", stop_logits.numpy().shape)
|
||||
|
||||
def test_predict_io(self):
|
||||
net = self.net
|
||||
net.eval()
|
||||
with paddle.no_grad():
|
||||
text = paddle.randint(0, 128, [176])
|
||||
decoder_output, encoder_attention_weights, cross_attention_weights = net.predict(text)
|
||||
|
||||
print("output shapes:")
|
||||
print("mel_output: ", decoder_output.numpy().shape)
|
||||
print("encoder_attentions:", [item.shape for item in encoder_attention_weights])
|
||||
print("decoder_attentions:", [item.shape for item in cross_attention_weights])
|
||||
|
|
@ -1,130 +0,0 @@
|
|||
import numpy as np
|
||||
import unittest
|
||||
|
||||
import paddle
|
||||
paddle.set_default_dtype("float64")
|
||||
paddle.disable_static(paddle.CPUPlace())
|
||||
|
||||
from parakeet.models import waveflow
|
||||
|
||||
class TestFold(unittest.TestCase):
|
||||
def test_audio(self):
|
||||
x = paddle.randn([4, 32 * 8])
|
||||
y = waveflow.fold(x, 8)
|
||||
self.assertTupleEqual(y.numpy().shape, (4, 32, 8))
|
||||
|
||||
def test_spec(self):
|
||||
x = paddle.randn([4, 80, 32 * 8])
|
||||
y = waveflow.fold(x, 8)
|
||||
self.assertTupleEqual(y.numpy().shape, (4, 80, 32, 8))
|
||||
|
||||
|
||||
class TestUpsampleNet(unittest.TestCase):
|
||||
def test_io(self):
|
||||
net = waveflow.UpsampleNet([2, 2])
|
||||
x = paddle.randn([4, 8, 6])
|
||||
y = net(x)
|
||||
self.assertTupleEqual(y.numpy().shape, (4, 8, 2 * 2 * 6))
|
||||
|
||||
|
||||
class TestResidualBlock(unittest.TestCase):
|
||||
def test_io(self):
|
||||
net = waveflow.ResidualBlock(4, 6, (3, 3), (2, 2))
|
||||
x = paddle.randn([4, 4, 16, 32])
|
||||
condition = paddle.randn([4, 6, 16, 32])
|
||||
res, skip = net(x, condition)
|
||||
self.assertTupleEqual(res.numpy().shape, (4, 4, 16, 32))
|
||||
self.assertTupleEqual(skip.numpy().shape, (4, 4, 16, 32))
|
||||
|
||||
def test_add_input(self):
|
||||
net = waveflow.ResidualBlock(4, 6, (3, 3), (2, 2))
|
||||
net.eval()
|
||||
net.start_sequence()
|
||||
|
||||
x_row = paddle.randn([4, 4, 1, 32])
|
||||
condition_row = paddle.randn([4, 6, 1, 32])
|
||||
|
||||
res, skip = net.add_input(x_row, condition_row)
|
||||
self.assertTupleEqual(res.numpy().shape, (4, 4, 1, 32))
|
||||
self.assertTupleEqual(skip.numpy().shape, (4, 4, 1, 32))
|
||||
|
||||
|
||||
class TestResidualNet(unittest.TestCase):
|
||||
def test_io(self):
|
||||
net = waveflow.ResidualNet(8, 6, 8, (3, 3), [1, 1, 1, 1, 1, 1, 1, 1])
|
||||
x = paddle.randn([4, 6, 8, 32])
|
||||
condition = paddle.randn([4, 8, 8, 32])
|
||||
y = net(x, condition)
|
||||
self.assertTupleEqual(y.numpy().shape, (4, 6, 8, 32))
|
||||
|
||||
def test_add_input(self):
|
||||
net = waveflow.ResidualNet(8, 6, 8, (3, 3), [1, 1, 1, 1, 1, 1, 1, 1])
|
||||
net.eval()
|
||||
net.start_sequence()
|
||||
|
||||
x_row = paddle.randn([4, 6, 1, 32])
|
||||
condition_row = paddle.randn([4, 8, 1, 32])
|
||||
|
||||
y_row = net.add_input(x_row, condition_row)
|
||||
self.assertTupleEqual(y_row.numpy().shape, (4, 6, 1, 32))
|
||||
|
||||
|
||||
class TestFlow(unittest.TestCase):
|
||||
def test_io(self):
|
||||
net = waveflow.Flow(8, 16, 7, (3, 3), 8)
|
||||
|
||||
x = paddle.randn([4, 1, 8, 32])
|
||||
condition = paddle.randn([4, 7, 8, 32])
|
||||
z, (logs, b) = net(x, condition)
|
||||
self.assertTupleEqual(z.numpy().shape, (4, 1, 8, 32))
|
||||
self.assertTupleEqual(logs.numpy().shape, (4, 1, 7, 32))
|
||||
self.assertTupleEqual(b.numpy().shape, (4, 1, 7, 32))
|
||||
|
||||
def test_inverse_row(self):
|
||||
net = waveflow.Flow(8, 16, 7, (3, 3), 8)
|
||||
net.eval()
|
||||
net._start_sequence()
|
||||
|
||||
x_row = paddle.randn([4, 1, 1, 32]) # last row
|
||||
condition_row = paddle.randn([4, 7, 1, 32])
|
||||
z_row = paddle.randn([4, 1, 1, 32])
|
||||
x_next_row, (logs, b) = net._inverse_row(z_row, x_row, condition_row)
|
||||
|
||||
self.assertTupleEqual(x_next_row.numpy().shape, (4, 1, 1, 32))
|
||||
self.assertTupleEqual(logs.numpy().shape, (4, 1, 1, 32))
|
||||
self.assertTupleEqual(b.numpy().shape, (4, 1, 1, 32))
|
||||
|
||||
def test_inverse(self):
|
||||
net = waveflow.Flow(8, 16, 7, (3, 3), 8)
|
||||
net.eval()
|
||||
|
||||
z = paddle.randn([4, 1, 8, 32])
|
||||
condition = paddle.randn([4, 7, 8, 32])
|
||||
|
||||
with paddle.no_grad():
|
||||
x, (logs, b) = net.inverse(z, condition)
|
||||
self.assertTupleEqual(x.numpy().shape, (4, 1, 8, 32))
|
||||
self.assertTupleEqual(logs.numpy().shape, (4, 1, 7, 32))
|
||||
self.assertTupleEqual(b.numpy().shape, (4, 1, 7, 32))
|
||||
|
||||
|
||||
class TestWaveFlow(unittest.TestCase):
|
||||
def test_io(self):
|
||||
x = paddle.randn([4, 32 * 8 ])
|
||||
condition = paddle.randn([4, 7, 32 * 8])
|
||||
net = waveflow.WaveFlow(2, 8, 8, 16, 7, (3, 3))
|
||||
z, logs_det_jacobian = net(x, condition)
|
||||
|
||||
self.assertTupleEqual(z.numpy().shape, (4, 32 * 8))
|
||||
self.assertTupleEqual(logs_det_jacobian.numpy().shape, (1,))
|
||||
|
||||
def test_inverse(self):
|
||||
z = paddle.randn([4, 32 * 8 ])
|
||||
condition = paddle.randn([4, 7, 32 * 8])
|
||||
|
||||
net = waveflow.WaveFlow(2, 8, 8, 16, 7, (3, 3))
|
||||
net.eval()
|
||||
|
||||
with paddle.no_grad():
|
||||
x = net.inverse(z, condition)
|
||||
self.assertTupleEqual(x.numpy().shape, (4, 32 * 8))
|
Loading…
Reference in New Issue