Skip to content

Commit 4055258

Browse files
author
Maksymilian Graczyk
committed
Remove test_training_sgd_many_iters()
1 parent 741238d commit 4055258

File tree

1 file changed

+28
-38
lines changed

1 file changed

+28
-38
lines changed

tests/test_lenet.py

+28-38
Original file line numberDiff line numberDiff line change
@@ -201,7 +201,7 @@ def loss_grad(layer, b):
201201
pytorch_layer.bias.grad, 1e-11)
202202

203203

204-
def run_training(net_arguments, mnist, iterations):
204+
def run_training(net_arguments, mnist):
205205
mnist_train, _ = mnist
206206

207207
net, pytorch_net, layers = net_arguments
@@ -216,54 +216,44 @@ def run_training(net_arguments, mnist, iterations):
216216
pytorch_net.fc1, pytorch_net.fc2, pytorch_net.fc3]
217217
devito_layers = [layers[0], layers[2], layers[5], layers[6], layers[7]]
218218

219-
epsilon = 1.26e-11
220-
221-
for i, data in enumerate(mnist_train, 0):
222-
images, labels = data
223-
224-
def loss_grad(layer, b):
225-
gradients = []
219+
images, labels = iter(mnist_train).next()
226220

227-
for j in range(10):
228-
result = layer.result.data[j, b]
229-
if j == labels[b]:
230-
result -= 1
231-
gradients.append(result)
221+
def loss_grad(layer, b):
222+
gradients = []
232223

233-
return gradients
224+
for j in range(10):
225+
result = layer.result.data[j, b]
226+
if j == labels[b]:
227+
result -= 1
228+
gradients.append(result)
234229

235-
images = images.double()
230+
return gradients
236231

237-
outputs = net.forward(images.numpy())
232+
images = images.double()
238233

239-
pytorch_optimizer.zero_grad()
240-
pytorch_outputs = pytorch_net(images)
234+
outputs = net.forward(images.numpy())
241235

242-
compare(outputs, nn.Softmax(dim=1)(pytorch_outputs),
243-
1e-12 + i * epsilon)
236+
pytorch_optimizer.zero_grad()
237+
pytorch_outputs = pytorch_net(images)
244238

245-
net.backward(loss_grad, optimizer)
239+
compare(outputs, nn.Softmax(dim=1)(pytorch_outputs),
240+
1e-12)
246241

247-
pytorch_loss = criterion(pytorch_outputs, labels)
248-
pytorch_loss.backward()
249-
pytorch_optimizer.step()
242+
net.backward(loss_grad, optimizer)
250243

251-
for j in range(len(pytorch_layers) - 1, -1, -1):
252-
pytorch_layer = pytorch_layers[j]
253-
devito_layer = devito_layers[j]
244+
pytorch_loss = criterion(pytorch_outputs, labels)
245+
pytorch_loss.backward()
246+
pytorch_optimizer.step()
254247

255-
compare(devito_layer.kernel.data, pytorch_layer.weight,
256-
1e-12 + i * epsilon)
257-
compare(devito_layer.bias.data, pytorch_layer.bias,
258-
1e-12 + i * epsilon)
248+
for j in range(len(pytorch_layers) - 1, -1, -1):
249+
pytorch_layer = pytorch_layers[j]
250+
devito_layer = devito_layers[j]
259251

260-
if i == iterations - 1:
261-
break
252+
compare(devito_layer.kernel.data, pytorch_layer.weight,
253+
1e-12)
254+
compare(devito_layer.bias.data, pytorch_layer.bias,
255+
1e-12)
262256

263257

264258
def test_training_sgd(net_arguments, mnist):
265-
run_training(net_arguments, mnist, 1)
266-
267-
268-
def test_training_sgd_many_iters(net_arguments, mnist):
269-
run_training(net_arguments, mnist, 20)
259+
run_training(net_arguments, mnist)

0 commit comments

Comments
 (0)