Skip to content

Commit 888eb17

Browse files
committed
Clean some old code.
1 parent c49ccfe commit 888eb17

File tree

3 files changed

+50
-57
lines changed

3 files changed

+50
-57
lines changed

mesgclsf/s2train.py

+7-7
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
FEATURE_WIDTH = 32
1010

1111

12-
def train_tas(model, model_scope, max_steps, result_file):
12+
def train_tas(model, model_scope, num_epoches, result_file):
1313
height, width = FEATURE_HEIGHT, FEATURE_WIDTH
1414

1515
feats0, feats1 = read_features_tas(height, width)
@@ -26,12 +26,12 @@ def train_tas(model, model_scope, max_steps, result_file):
2626

2727
res_dir = os.path.join(PROJECT_ROOT, 'Data', 'Result')
2828
img_cnn = ImgConvNets(model, model_scope, height, width, class_count=2, keep_prob=0.5,
29-
batch_size=32, learning_rate=1e-4, lr_adaptive=True, max_steps=max_steps)
29+
batch_size=32, learning_rate=1e-4, lr_adaptive=True, num_epoches=num_epoches)
3030

3131
img_cnn.train(all_feats, all_y, res_dir, result_file=result_file)
3232

3333

34-
def train_lss(model, model_scope, max_steps, result_file):
34+
def train_lss(model, model_scope, num_epoches, result_file):
3535
height, width = FEATURE_HEIGHT, FEATURE_WIDTH
3636

3737
feats0, feats1, feats2, feats3 = read_features_lss(height, width)
@@ -52,7 +52,7 @@ def train_lss(model, model_scope, max_steps, result_file):
5252

5353
res_dir = os.path.join(PROJECT_ROOT, 'Data', 'Result')
5454
img_cnn = ImgConvNets(model, model_scope, height, width, class_count=4, keep_prob=0.5,
55-
batch_size=32, learning_rate=1e-4, lr_adaptive=True, max_steps=max_steps)
55+
batch_size=32, learning_rate=1e-4, lr_adaptive=True, num_epoches=num_epoches)
5656

5757
img_cnn.train(all_feats, all_y, res_dir, result_file=result_file)
5858

@@ -99,14 +99,14 @@ def read_features_lss(height, width, folder='Training'):
9999
from misc.cnnpredictor import CnnPredictor
100100

101101
training = True
102-
sign_type = 'TAS'
102+
sign_type = 'LSS'
103103

104104
if training:
105105
t0 = time()
106106
if sign_type == 'TAS':
107-
train_tas(model='BASIC', model_scope='s2_tas', max_steps=16000, result_file='s2_tas_model')
107+
train_tas(model='BASIC', model_scope='s2_tas', num_epoches=50, result_file='s2_tas_model')
108108
else:
109-
train_lss(model='BASIC', model_scope='s2_lss', max_steps=16000, result_file='s2_lss_model')
109+
train_lss(model='BASIC', model_scope='s2_lss', num_epoches=50, result_file='s2_lss_model')
110110

111111
t1 = time()
112112
print("Training time: {:6.2f} seconds".format(t1 - t0))

misc/imgconvnets.py

+40-47
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ class ImgConvNets(object):
1313
are multiplications of 4.
1414
"""
1515
def __init__(self, model, model_scope, img_height, img_width, class_count, keep_prob=0.5,
16-
learning_rate=1e-4, lr_adaptive=True, batch_size=32, max_steps=20000):
16+
learning_rate=1e-4, lr_adaptive=True, batch_size=32, num_epoches=100):
1717
"""
1818
Args:
1919
model: Specify which model to use.
@@ -28,7 +28,7 @@ def __init__(self, model, model_scope, img_height, img_width, class_count, keep_
2828
accuracy. If True, the given learning_rate will be ignored.
2929
batch_size: optional. The number of samples to be used in one step of the
3030
optimization process.
31-
max_steps: optional. The max number of iterative steps in the training process.
31+
num_epoches: optional. The number of epoches for the training process.
3232
"""
3333
assert model == 'BASIC' or model == 'DCNN' or model == 'STCNN'
3434

@@ -41,7 +41,7 @@ def __init__(self, model, model_scope, img_height, img_width, class_count, keep_
4141
self.learning_rate = learning_rate
4242
self.lr_adaptive = lr_adaptive
4343
self.batch_size = batch_size
44-
self.max_steps = max_steps
44+
self.num_epoches = num_epoches
4545

4646
def train(self, img_features, true_labels, train_dir, result_file):
4747
"""
@@ -103,43 +103,44 @@ def train(self, img_features, true_labels, train_dir, result_file):
103103

104104
save_file = os.path.join(train_dir, result_file)
105105

106-
disp_step = self._get_epoch_step_count(train_set.shape[0])
107-
for step in range(self.max_steps):
108-
# Read a batch of images and labels
109-
batch_data = self._get_next_batch(train_set, step*self.batch_size)
110-
images_feed, labels_feed = \
111-
batch_data[:, :cols], batch_data[:, cols:].reshape(-1)
112-
106+
epoch_steps = math.ceil(train_set.shape[0] / self.batch_size)
107+
for epoch in range(1, self.num_epoches+1):
113108
lr_feed = self._get_learning_rate(last_accu)
114-
# Run one step of the model. The return values are the activations
115-
# from the `train_op` (which is discarded) and the `loss` Op.
116-
_, loss_val, accu_val = sess.run([train_op, loss, accuracy],
117-
feed_dict={images_placeholder: images_feed,
118-
labels_placeholder: labels_feed,
119-
learning_rate_placeholder: lr_feed,
120-
keep_prob_placeholder: self.keep_prob})
121-
122-
# Check to make sure the loss is decreasing
123-
loss_list.append(loss_val)
124-
accu_list.append(accu_val)
125-
if (step % disp_step == 0) or (step == self.max_steps-1):
126-
mean_accu = sum(accu_list)*100/len(accu_list)
127-
if mean_accu >= 99.68 and mean_accu > last_accu:
128-
saver.save(sess, save_file, global_step=step)
129-
elif step == self.max_steps - 1:
130-
saver.save(sess, save_file)
131-
132-
print("Step {:6d}: learning_rate used = {:.6f}, average loss = {:8.4f}, "
133-
"and training accuracy min = {:6.2f}%, mean = {:6.2f}%, "
134-
"max = {:6.2f}%".format(step, lr_feed,
135-
sum(loss_list)/len(loss_list),
136-
min(accu_list)*100, mean_accu,
137-
max(accu_list)*100))
138-
if mean_accu >= 99.99: break
139-
140-
loss_list = []
141-
accu_list = []
142-
last_accu = mean_accu
109+
for step in range(epoch_steps):
110+
# Read a batch of images and labels
111+
batch_data = self._get_next_batch(train_set, step*self.batch_size)
112+
images_feed, labels_feed = \
113+
batch_data[:, :cols], batch_data[:, cols:].reshape(-1)
114+
115+
# Run one step of the model. The return values are the activations
116+
# from the `train_op` (which is discarded) and the `loss` Op.
117+
_, loss_val, accu_val = sess.run([train_op, loss, accuracy],
118+
feed_dict={images_placeholder: images_feed,
119+
labels_placeholder: labels_feed,
120+
learning_rate_placeholder: lr_feed,
121+
keep_prob_placeholder: self.keep_prob})
122+
123+
# Check to make sure the loss is decreasing
124+
loss_list.append(loss_val)
125+
accu_list.append(accu_val)
126+
127+
mean_accu = sum(accu_list)*100/len(accu_list)
128+
if mean_accu >= 99.68 and mean_accu > last_accu:
129+
saver.save(sess, save_file, global_step=epoch)
130+
elif epoch == self.num_epoches - 1:
131+
saver.save(sess, save_file)
132+
133+
print("Epoch {:3d} completed: learning_rate used = {:.6f}, average loss = {:8.4f}, "
134+
"and training accuracy min = {:6.2f}%, mean = {:6.2f}%, "
135+
"max = {:6.2f}%".format(epoch, lr_feed,
136+
sum(loss_list)/len(loss_list),
137+
min(accu_list)*100, mean_accu,
138+
max(accu_list)*100))
139+
if mean_accu >= 99.99: break
140+
141+
loss_list = []
142+
accu_list = []
143+
last_accu = mean_accu
143144

144145
def _build_inference_graph_stcnn(self, images, keep_prob):
145146
"""
@@ -393,14 +394,6 @@ def _build_training_graph(self, logits, labels, learning_rate):
393394

394395
return train_op, loss, accuracy
395396

396-
def _get_epoch_step_count(self, train_set_size):
397-
if self.max_steps > 10000:
398-
epoch_step = math.ceil(train_set_size / (self.batch_size * 1000.0)) * 1000
399-
else:
400-
epoch_step = math.ceil(train_set_size / (self.batch_size * 100.0)) * 100
401-
402-
return epoch_step
403-
404397
def _get_next_batch(self, data_set, start_index):
405398
cnt = data_set.shape[0]
406399

textdect/convertmodel.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ def convert(model_dir, keras_model_file, tf_model_file, name_output='s1_output',
112112
if __name__ == '__main__':
113113
from settings import PROJECT_ROOT
114114

115-
action = 'convert' # Modify this line to run convert or predict
115+
action = 'predict' # Modify this line to run convert or predict
116116
if action == 'convert':
117117
model_dir = os.path.join(PROJECT_ROOT, 'Data', 'Result')
118118
keras_model = 's1_model_weights.h5' # model architecture and weights
@@ -131,9 +131,9 @@ def convert(model_dir, keras_model_file, tf_model_file, name_output='s1_output',
131131
full_path_name = os.path.join(img_dir, img_file)
132132
if os.path.isfile(full_path_name) and img_file.lower().endswith(tuple(['.jpg', '.png'])):
133133
file_count += 1
134-
if file_count > 200:
134+
if file_count > 2600:
135135
file_list.append(full_path_name)
136-
if file_count >= 1000:
136+
if file_count >= 3000:
137137
break
138138

139139
s1_predict('config.json', model_dir, model_file, file_list, out_dir)

0 commit comments

Comments
 (0)