-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest4.py
80 lines (49 loc) · 1.51 KB
/
test4.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 5 14:50:20 2017
@author: lyx
"""
import tensorflow as tf
'''
v=tf.constant([[1.0,2.0,3.0],[4.0,5.0,6.0]])
with tf.Session() as sess:
print (tf.clip_by_value(v,2.5,4.5).eval())
print(tf.log(v).eval())
print(tf.reduce_mean(v).eval())
#sess=tf.InteractiveSession()
#print(tf.reduce_mean(v).eval())
#sess.close()
v1=tf.constant([1.0,2.0,3.0,4.0])
v2=tf.constant([4.0,3.0,2.0,1.0])
sess=tf.InteractiveSession()
print(tf.greater(v1,v2).eval())
print(tf.where(tf.greater(v1,v2),v1,v2).eval())
sess.close()
'''
batch_size=8
from numpy.random import RandomState
x=tf.placeholder(tf.float32,shape=(None,2),name='x-input')
y_=tf.placeholder(tf.float32,shape=(None,1),name='y-input')
w1=tf.Variable(tf.random_normal([2,1],stddev=1,seed=1))
y=tf.matmul(x,w1)
loss_less=10
loss_more=1
loss=tf.reduce_sum(tf.where(tf.greater(y,y_),
(y-y_)*loss_more,
(y_-y)*loss_less))
train_step=tf.train.AdamOptimizer(0.001).minimize(loss)
rdm=RandomState(1)
dataset_size=128
X=rdm.rand(dataset_size,2)
Y=[[x1+x2+rdm.rand()/10.0-0.05] for (x1,x2) in X]
with tf.Session() as sess:
init_op=tf.initialize_all_variables()
sess.run(init_op)
STEPS=5000
for i in range(STEPS):
start=(i*batch_size)%dataset_size
end=min(start+batch_size,dataset_size)
sess.run(train_step,
feed_dict={x:X[start:end],y_:Y[start:end]})
print(sess.run(w1))