Skip to content

Use print() function in both Python 2 and Python 3 #45

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 5 additions & 4 deletions airline/ann.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import print_function
# The corresponding tutorial for this code was released EXCLUSIVELY as a bonus
# If you want to learn about future bonuses, please sign up for my newsletter at:
# https://lazyprogrammer.me
Expand Down Expand Up @@ -109,7 +110,7 @@ def fit(self, X, Y, activation=T.tanh, learning_rate=1e-3, mu=0.5, reg=0, epochs
c, p = train_op(Xbatch, Ybatch)
costs.append(c)
if (j+1) % print_period == 0:
print "i:", i, "j:", j, "nb:", n_batches, "cost:", c
print("i:", i, "j:", j, "nb:", n_batches, "cost:", c)

if show_fig:
plt.plot(costs)
Expand Down Expand Up @@ -156,16 +157,16 @@ def predict(self, X):
X[:,d] = series[d:d+n]
Y = series[D:D+n]

print "series length:", n
print("series length:", n)
Xtrain = X[:n/2]
Ytrain = Y[:n/2]
Xtest = X[n/2:]
Ytest = Y[n/2:]

model = ANN([200])
model.fit(Xtrain, Ytrain, activation=T.tanh)
print "train score:", model.score(Xtrain, Ytrain)
print "test score:", model.score(Xtest, Ytest)
print("train score:", model.score(Xtrain, Ytrain))
print("test score:", model.score(Xtest, Ytest))

# plot the prediction with true values
plt.plot(series)
Expand Down
7 changes: 4 additions & 3 deletions airline/lr.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import print_function
# The corresponding tutorial for this code was released EXCLUSIVELY as a bonus
# If you want to learn about future bonuses, please sign up for my newsletter at:
# https://lazyprogrammer.me
Expand Down Expand Up @@ -31,16 +32,16 @@
X[:,d] = series[d:d+n]
Y = series[D:D+n]

print "series length:", n
print("series length:", n)
Xtrain = X[:n/2]
Ytrain = Y[:n/2]
Xtest = X[n/2:]
Ytest = Y[n/2:]

model = LinearRegression()
model.fit(Xtrain, Ytrain)
print "train score:", model.score(Xtrain, Ytrain)
print "test score:", model.score(Xtest, Ytest)
print("train score:", model.score(Xtrain, Ytrain))
print("test score:", model.score(Xtest, Ytest))

# plot the prediction with true values
plt.plot(series)
Expand Down
9 changes: 5 additions & 4 deletions airline/rnn.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import print_function
# The corresponding tutorial for this code was released EXCLUSIVELY as a bonus
# If you want to learn about future bonuses, please sign up for my newsletter at:
# https://lazyprogrammer.me
Expand Down Expand Up @@ -87,7 +88,7 @@ def fit(self, X, Y, activation=T.tanh, learning_rate=1e-1, mu=0.5, reg=0, epochs
c = self.train_op(learning_rate, X[j], Y[j])
cost += c
if i % 10 == 0:
print "i:", i, "cost:", cost, "time for epoch:", (datetime.now() - t0)
print("i:", i, "cost:", cost, "time for epoch:", (datetime.now() - t0))
if (i+1) % 500 == 0:
learning_rate /= 10
costs.append(cost)
Expand Down Expand Up @@ -141,7 +142,7 @@ def predict(self, X):
X[:,d] = series[d:d+n]
Y = series[D:D+n]

print "series length:", n
print("series length:", n)
Xtrain = X[:n/2]
Ytrain = Y[:n/2]
Xtest = X[n/2:]
Expand All @@ -154,8 +155,8 @@ def predict(self, X):

model = RNN([50])
model.fit(Xtrain, Ytrain, activation=T.tanh)
print "train score:", model.score(Xtrain, Ytrain)
print "test score:", model.score(Xtest, Ytest)
print("train score:", model.score(Xtrain, Ytrain))
print("test score:", model.score(Xtest, Ytest))

# plot the prediction with true values
plt.plot(series)
Expand Down
13 changes: 7 additions & 6 deletions bayesian_ml/1/nb.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import print_function
# Naive Bayes with prior on mean and precision of Gaussian
# mean | precision ~ N(0, c / precision)
# precision ~ Gamma(a, b)
Expand Down Expand Up @@ -120,15 +121,15 @@ def plot_image(x, Q, title):
Ytest = pd.read_csv('ytest.csv', header=None).as_matrix().flatten()
model = NB()
model.fit(Xtrain, Ytrain)
print "train accuracy:", model.score(Xtrain, Ytrain)
print "test accuracy:", model.score(Xtest, Ytest)
print("train accuracy:", model.score(Xtrain, Ytrain))
print("test accuracy:", model.score(Xtest, Ytest))

# confusion matrix
M = model.confusion_matrix(Xtest, Ytest)
print "confusion matrix:"
print M
print "N:", len(Ytest)
print "sum(M):", M.sum()
print("confusion matrix:")
print(M)
print("N:", len(Ytest))
print("sum(M):", M.sum())

# plot 3 misclassified
Q = pd.read_csv('Q.csv', header=None).as_matrix()
Expand Down
9 changes: 5 additions & 4 deletions bayesian_ml/2/em.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import print_function
# expectation-maximization for the model:
# x(n) ~ N(Wz(n), sigma**2 I) (observed variables)
# z(n) ~ N(0, I) (latent variables)
Expand Down Expand Up @@ -61,9 +62,9 @@ def loglikelihood(X, Z, W):
plt.plot(costs)
plt.show()

print "actual W:", W0
print "predicted W:", W
print("actual W:", W0)
print("predicted W:", W)

print "log-likelihood given real W:", loglikelihood(X, Z, W0)
print("log-likelihood given real W:", loglikelihood(X, Z, W0))

print "log-likelihood found:", costs[-1]
print("log-likelihood found:", costs[-1])
13 changes: 7 additions & 6 deletions bayesian_ml/2/probit.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import print_function
# probit regression

import numpy as np
Expand Down Expand Up @@ -103,15 +104,15 @@ def plot_image(x, Q, title):
Ytest = pd.read_csv('ytest.csv', header=None).as_matrix().flatten()
model = ProbitRegression()
model.fit(Xtrain, Ytrain)
print "train accuracy:", model.score(Xtrain, Ytrain)
print "test accuracy:", model.score(Xtest, Ytest)
print("train accuracy:", model.score(Xtrain, Ytrain))
print("test accuracy:", model.score(Xtest, Ytest))

# confusion matrix
M = model.confusion_matrix(Xtest, Ytest)
print "confusion matrix:"
print M
print "N:", len(Ytest)
print "sum(M):", M.sum()
print("confusion matrix:")
print(M)
print("N:", len(Ytest))
print("sum(M):", M.sum())

# plot 3 misclassified
Q = pd.read_csv('Q.csv', header=None).as_matrix()
Expand Down
9 changes: 5 additions & 4 deletions bayesian_ml/3/run.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import print_function
# variational-inference for linear regression
# y(i) ~ N( x(i).dot(w), 1/lambda )
# w ~ N( 0, diag(alpha_1, alpha_2, ..., alpha_D)^-1 )
Expand Down Expand Up @@ -81,7 +82,7 @@ def run(num=1, T=500):
Y = pd.read_csv('y_set%s.csv' % num, header=None).as_matrix().flatten()
Z = pd.read_csv('z_set%s.csv' % num, header=None).as_matrix().flatten()
N, D = X.shape
print X.shape, Y.shape, Z.shape
print(X.shape, Y.shape, Z.shape)

a0 = 1e-16
b0 = 1e-16
Expand Down Expand Up @@ -129,16 +130,16 @@ def run(num=1, T=500):
# update L
L[t] = objective(X, Y, C, mu, a, b, e, f, a0, b0, e0, f0)
if t % 20 == 0:
print "t:", t
print("t:", t)
if num == 3:
print "L:", L[t]
print("L:", L[t])

# plot 1/E[alpha]
plt.plot(b/a)
plt.show()

# 1/E[lambda]
print "1/E[lambda]:", f/e
print("1/E[lambda]:", f/e)

# plot L
plt.plot(L)
Expand Down
7 changes: 4 additions & 3 deletions bayesian_ml/4/emgmm.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import print_function
# GMM using Expectation-Maximization

import numpy as np
Expand Down Expand Up @@ -54,9 +55,9 @@ def gmm(X, K, max_iter=20, smoothing=1e-2):
plt.scatter(X[:,0], X[:,1], c=R.argmax(axis=1))
plt.show()

print "pi:", pi
print "means:", M
print "covariances:", C
print("pi:", pi)
print("means:", M)
print("covariances:", C)
return R


Expand Down
3 changes: 2 additions & 1 deletion bayesian_ml/4/npbgmm.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import print_function
# GMM using Bayesian Nonparametric Clustering
# Gaussian Mixture Model
# Dirichlet Process
Expand Down Expand Up @@ -96,7 +97,7 @@ def gmm(X, T=500):
observations_per_cluster = np.zeros((T, 6))
for t in xrange(T):
if t % 20 == 0:
print t
print(t)
# 1) calculate phi[i,j]
# Notes:
# MANY new clusters can be made each iteration
Expand Down
3 changes: 2 additions & 1 deletion bayesian_ml/4/vigmm.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import print_function
# GMM using Variational Inference

import numpy as np
Expand Down Expand Up @@ -199,7 +200,7 @@ def gmm(X, K, max_iter=100):
plt.title("Costs")
plt.show()

print "cluster assignments:\n", cluster_assignments
print("cluster assignments:\n", cluster_assignments)
plt.scatter(X[:,0], X[:,1], c=cluster_assignments, s=100, alpha=0.7)
plt.show()

Expand Down
11 changes: 6 additions & 5 deletions best_fit_line.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import print_function
from pulp import *

### remove variable b because it is unconstrained
Expand Down Expand Up @@ -39,12 +40,12 @@
prob += (a*x - y - c >= -z)

status = prob.solve(GLPK(msg = 0))
print "status:", LpStatus[status]
print "values:"
print "\ta:", value(a)
print("status:", LpStatus[status])
print("values:")
print("\ta:", value(a))
# print "\tb:", value(b)
print "\tc:", value(c)
print "\tz:", value(z)
print("\tc:", value(c))
print("\tz:", value(z))


# extra part to plot everything
Expand Down
7 changes: 4 additions & 3 deletions cnn_class/cifar.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import print_function
# https://deeplearningcourses.com/c/deep-learning-convolutional-neural-networks-theano-tensorflow
import os
import numpy as np
Expand Down Expand Up @@ -43,7 +44,7 @@ def getImageData():
im = Image.open("../large_files/cifar10/train/%s.png" % (i + 1))
X[i] = image2array(im)
if i % 1000 == 0:
print i
print(i)
np.save(savedXpath, X.astype(np.uint8))
else:
X = np.load(savedXpath)
Expand All @@ -62,7 +63,7 @@ def getImageData():
idx += 1
Y[i] = label2idx[s]
i += 1
print "done loading data"
print("done loading data")
X, Y = shuffle(X, Y)
return X[:30000], Y[:30000]

Expand Down Expand Up @@ -202,7 +203,7 @@ def fit(self, X, Y, lr=1e-4, mu=0.99, reg=1e-6, decay=0.99999, eps=1e-2, batch_s
c, p = cost_predict_op(Xvalid, Yvalid)
costs.append(c)
e = error_rate(Yvalid, p)
print "i:", i, "j:", j, "nb:", n_batches, "cost:", c, "error rate:", e
print("i:", i, "j:", j, "nb:", n_batches, "cost:", c, "error rate:", e)

if show_fig:
plt.plot(costs)
Expand Down
2 changes: 1 addition & 1 deletion cnn_class2/tf_resnet_convblock_starter.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,5 +31,5 @@ def predict(self, X):
conv_block.session = session
session.run(init)

output = conv_block.predict(X):
output = conv_block.predict(X)
print("output.shape:", output.shape)
4 changes: 2 additions & 2 deletions hmm_class/generate_ht.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,9 @@ def main():
for n in range(50):
sequence = generate_sequence(30)
sequence = ''.join(symbol_map[s] for s in sequence)
print sequence
print(sequence)
f.write("%s\n" % sequence)


if __name__ == '__main__':
main()
main()
7 changes: 4 additions & 3 deletions hmm_class/sites.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import print_function
# https://deeplearningcourses.com/c/unsupervised-machine-learning-hidden-markov-models-in-python
# https://udemy.com/unsupervised-machine-learning-hidden-markov-models-in-python
# http://lazyprogrammer.me
Expand All @@ -19,14 +20,14 @@
transitions[k] = v / row_sums[s]

# initial state distribution
print "initial state distribution:"
print("initial state distribution:")
for k, v in transitions.iteritems():
s, e = k
if s == '-1':
print e, v
print(e, v)

# which page has the highest bounce?
for k, v in transitions.iteritems():
s, e = k
if e == 'B':
print "bounce rate for %s: %s" % (s, v)
print("bounce rate for %s: %s" % (s, v))
3 changes: 2 additions & 1 deletion numpy_class/dot_for.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import print_function
# https://deeplearningcourses.com/c/deep-learning-prerequisites-the-numpy-stack-in-python
# https://www.udemy.com/deep-learning-prerequisites-the-numpy-stack-in-python
import numpy as np
Expand All @@ -23,4 +24,4 @@ def slow_dot_product(a, b):
a.dot(b)
dt2 = datetime.now() - t0

print "dt1 / dt2:", dt1.total_seconds() / dt2.total_seconds()
print("dt1 / dt2:", dt1.total_seconds() / dt2.total_seconds())
3 changes: 2 additions & 1 deletion numpy_class/manual_data_loading.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import print_function
# https://deeplearningcourses.com/c/deep-learning-prerequisites-the-numpy-stack-in-python
# https://www.udemy.com/deep-learning-prerequisites-the-numpy-stack-in-python

Expand All @@ -16,4 +17,4 @@
X.append(sample)

X = np.array(X)
print X
print(X)