Skip to content

Commit 05baeb9

Browse files
committed
Update
1 parent 0bd9445 commit 05baeb9

File tree

4 files changed

+22
-16
lines changed

4 files changed

+22
-16
lines changed

.coveragerc

+1
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ source = neural_sp
55
[report]
66
exclude_lines =
77
raise ValueError
8+
raise TypeError
89
raise NotImplementedError
910
if __name__ == .__main__.:
1011

neural_sp/models/modules/mocha.py

+3-6
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,8 @@ def __init__(self, kdim, qdim, adim, atype, n_heads, init_r,
7272
if conv1d:
7373
self.conv1d = CausalConv1d(in_channels=kdim,
7474
out_channels=kdim,
75-
kernel_size=conv_kernel_size)
75+
kernel_size=conv_kernel_size,
76+
param_init=param_init)
7677
# padding=(conv_kernel_size - 1) // 2
7778

7879
if atype == 'add':
@@ -92,10 +93,6 @@ def reset_parameters(self, bias):
9293
if bias:
9394
nn.init.constant_(self.w_key.bias, 0.)
9495
nn.init.constant_(self.w_query.bias, 0.)
95-
if self.conv1d is not None:
96-
logger.info('===== Initialize %s with Xavier uniform distribution =====' % self.conv1d.__class__.__name__)
97-
for n, p in self.conv1d.named_parameters():
98-
init_with_xavier_uniform(n, p)
9996

10097
def reset(self):
10198
self.key = None
@@ -591,7 +588,7 @@ def forward(self, key, value, query, mask=None, aw_prev=None,
591588

592589

593590
def add_gaussian_noise(xs, std):
594-
"""Additive gaussian nosie to encourage discreteness."""
591+
"""Add Gaussian nosie to encourage discreteness."""
595592
noise = xs.new_zeros(xs.size()).normal_(std=std)
596593
return xs + noise
597594

neural_sp/models/modules/positional_embedding.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,9 @@ def forward(self, xs, scale=True):
8989
xs = xs + self.pe[:, :xs.size(1)]
9090
xs = self.dropout(xs)
9191
elif self.pe_type == 'concat':
92-
xs = torch.cat([xs, self.pe[:, :xs.size(1)]], dim=-1)
92+
raise NotImplementedError
93+
xs = torch.cat([xs, self.pe[:, :xs.size(1)].repeat([xs.size(0), 1, 1])], dim=-1)
94+
# TODO(hirofumi0810): need dimension reduction
9395
xs = self.dropout(xs)
9496
elif '1dconv' in self.pe_type:
9597
xs = self.pe(xs)

test/modules/test_zoneout.py

+15-9
Original file line numberDiff line numberDiff line change
@@ -47,12 +47,18 @@ def test_forward(rnn_type, args):
4747
module = importlib.import_module('neural_sp.models.modules.zoneout')
4848
zoneout_cell = module.ZoneoutCell(**args)
4949

50-
if rnn_type == 'lstm':
51-
h, c = zoneout_cell(xs, (hxs, cxs))
52-
assert h.size() == (batch_size, cell_size)
53-
assert c.size() == (batch_size, cell_size)
54-
elif rnn_type == 'gru':
55-
h = zoneout_cell(xs, hxs)
56-
assert h.size() == (batch_size, cell_size)
57-
else:
58-
raise ValueError(rnn_type)
50+
for mode in ['train', 'eval']:
51+
if mode == 'train':
52+
zoneout_cell.train()
53+
elif mode == 'eval':
54+
zoneout_cell.eval()
55+
56+
if rnn_type == 'lstm':
57+
h, c = zoneout_cell(xs, (hxs, cxs))
58+
assert h.size() == (batch_size, cell_size)
59+
assert c.size() == (batch_size, cell_size)
60+
elif rnn_type == 'gru':
61+
h = zoneout_cell(xs, hxs)
62+
assert h.size() == (batch_size, cell_size)
63+
else:
64+
raise ValueError(rnn_type)

0 commit comments

Comments
 (0)