Skip to content

Commit 892425d

Browse files
authored
Merge pull request #1234 from apache/dev-postgresql
Merge the dev branch to master branch
2 parents ae2e50f + 9fac6c1 commit 892425d

File tree

24 files changed

+3307
-13
lines changed

24 files changed

+3307
-13
lines changed

examples/cnn_ms/msmlp/model.py

+27-5
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232

3333
singa_dtype = {"float16": tensor.float16, "float32": tensor.float32}
3434

35+
3536
#### self-defined loss begin
3637

3738
### from autograd.py
@@ -62,11 +63,13 @@ def backward(self, dy=1.0):
6263
dx *= dy
6364
return dx
6465

66+
6567
def se_loss(x):
6668
# assert x.shape == t.shape, "input and target shape different: %s, %s" % (
6769
# x.shape, t.shape)
6870
return SumError()(x)[0]
6971

72+
7073
### from layer.py
7174
class SumErrorLayer(Layer):
7275
"""
@@ -79,6 +82,7 @@ def __init__(self):
7982
def forward(self, x):
8083
return se_loss(x)
8184

85+
8286
#### self-defined loss end
8387

8488
class MSMLP(model.Model):
@@ -92,7 +96,6 @@ def __init__(self, data_size=10, perceptron_size=100, num_classes=10):
9296
self.linear1 = layer.Linear(perceptron_size)
9397
self.linear2 = layer.Linear(num_classes)
9498
self.softmax_cross_entropy = layer.SoftMaxCrossEntropy()
95-
9699
self.sum_error = SumErrorLayer()
97100

98101
def forward(self, inputs):
@@ -101,12 +104,24 @@ def forward(self, inputs):
101104
y = self.linear2(y)
102105
return y
103106

104-
def train_one_batch(self, x, y, synflow_flag, dist_option, spars):
107+
def train_one_batch(self, x, y, dist_option, spars, synflow_flag):
108+
# print ("in train_one_batch")
105109
out = self.forward(x)
106-
loss = self.softmax_cross_entropy(out, y)
110+
# print ("train_one_batch x.data: \n", x.data)
111+
# print ("train_one_batch y.data: \n", y.data)
112+
# print ("train_one_batch out.data: \n", out.data)
113+
if synflow_flag:
114+
# print ("sum_error")
115+
loss = self.sum_error(out)
116+
else: # normal training
117+
# print ("softmax_cross_entropy")
118+
loss = self.softmax_cross_entropy(out, y)
119+
# print ("train_one_batch loss.data: \n", loss.data)
107120

108121
if dist_option == 'plain':
122+
# print ("before pn_p_g_list = self.optimizer(loss)")
109123
pn_p_g_list = self.optimizer(loss)
124+
# print ("after pn_p_g_list = self.optimizer(loss)")
110125
elif dist_option == 'half':
111126
self.optimizer.backward_and_update_half(loss)
112127
elif dist_option == 'partialUpdate':
@@ -119,17 +134,24 @@ def train_one_batch(self, x, y, synflow_flag, dist_option, spars):
119134
self.optimizer.backward_and_sparse_update(loss,
120135
topK=False,
121136
spars=spars)
137+
# print ("len(pn_p_g_list): \n", len(pn_p_g_list))
138+
# print ("len(pn_p_g_list[0]): \n", len(pn_p_g_list[0]))
139+
# print ("pn_p_g_list[0][0]: \n", pn_p_g_list[0][0])
140+
# print ("pn_p_g_list[0][1].data: \n", pn_p_g_list[0][1].data)
141+
# print ("pn_p_g_list[0][2].data: \n", pn_p_g_list[0][2].data)
122142
return pn_p_g_list, out, loss
143+
# return pn_p_g_list[0], pn_p_g_list[1], pn_p_g_list[2], out, loss
123144

124145
def set_optimizer(self, optimizer):
125146
self.optimizer = optimizer
126147

127148

128149
def create_model(pretrained=False, **kwargs):
129150
"""Constructs a CNN model.
151+
130152
Args:
131153
pretrained (bool): If True, returns a pre-trained model.
132-
154+
133155
Returns:
134156
The created CNN model.
135157
"""
@@ -196,4 +218,4 @@ def create_model(pretrained=False, **kwargs):
196218
out, loss = model(tx, ty, 'fp32', spars=None)
197219

198220
if i % 100 == 0:
199-
print("training loss = ", tensor.to_numpy(loss)[0])
221+
print("training loss = ", tensor.to_numpy(loss)[0])

0 commit comments

Comments
 (0)