Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix bugs causing incorrect results with OpenMP #6

Merged
merged 17 commits into from
Aug 25, 2020
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 5 additions & 4 deletions Dockerfile_CPU
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
FROM python:3
FROM ubuntu:latest
RUN apt-get update && apt-get -y upgrade && apt-get -y install python3-pip build-essential
WORKDIR /usr/src/app
RUN pip install torch==1.6.0+cpu torchvision==0.7.0+cpu -f https://download.pytorch.org/whl/torch_stable.html
RUN pip3 install torch==1.6.0+cpu torchvision==0.7.0+cpu -f https://download.pytorch.org/whl/torch_stable.html
COPY requirements.txt joey/
RUN pip install -r joey/requirements.txt
RUN pip3 install -r joey/requirements.txt
COPY . joey/
RUN pip install -e joey
RUN pip3 install -e joey
WORKDIR /usr/src/app/joey
10 changes: 6 additions & 4 deletions joey/base.py
Original file line number Diff line number Diff line change
@@ -47,7 +47,9 @@ def __init__(self, kernel_size,
dim_allocator_func)

if generate_code:
self._op = Operator(self.equations())
eqs, args = self.equations()
self._arg_dict = dict(args)
self._op = Operator(eqs)
self._op.cfunction

@property
@@ -120,14 +122,14 @@ def _allocate(self, kernel_size, input_size, name_allocator_func,

@abstractmethod
def execute(self, kernel_data=None, input_data=None, bias=None) -> array:
self._op.apply()
self._op.apply(**self._arg_dict)
return self._R.data

@abstractmethod
def equations(self, input_function=None) -> list:
def equations(self) -> (list, list):
pass

@abstractmethod
def backprop_equations(self, prev_layer, next_layer,
batch_constant, backward_arg_dict) -> list:
batch_constant) -> (list, list):
pass
371 changes: 184 additions & 187 deletions joey/layers.py

Large diffs are not rendered by default.

41 changes: 31 additions & 10 deletions joey/net.py
Original file line number Diff line number Diff line change
@@ -11,10 +11,17 @@ class Net:
def __init__(self, layers: list):
self._layers = layers
self._batch_constant = Constant(name='batch', dtype=np.int32)
self._forward_arg_dict = {}
self._backward_arg_dict = {}

eqs = self._gen_eqs()
backprop_eqs = self._gen_backprop_eqs()
eqs, args = self._gen_eqs()
backprop_eqs, backprop_args = self._gen_backprop_eqs()

for (key, value) in args:
self._forward_arg_dict[key] = value

for (key, value) in backprop_args:
self._backward_arg_dict[key] = value

parameter_lists = list(map(ml.Layer.pytorch_parameters, self._layers))
parameters = []
@@ -35,23 +42,34 @@ def __init__(self, layers: list):
def _init_parameters(self):
for layer in self._layers:
if layer.kernel is not None:
layer.kernel.data[:] = np.random.rand(*layer.kernel.shape) - 0.5
layer.kernel.data[:] = \
np.random.rand(*layer.kernel.shape) - 0.5

if layer.bias is not None:
layer.bias.data[:] = np.random.rand(*layer.bias.shape) - 0.5

def _gen_eqs(self):
eqs = []
args = []

input_function = None

for layer in self._layers:
eqs += layer.equations(input_function=input_function)
if input_function is not None:
dims = input_function.dimensions
eqs.append(Eq(layer.input[dims], input_function[dims]))

layer_eqs, layer_args = layer.equations()

args += layer_args
eqs += layer_eqs
input_function = layer.result

return eqs
return (eqs, args)

def _gen_backprop_eqs(self):
eqs = []
args = []

for i in range(len(self._layers) - 1, -1, -1):
if i < len(self._layers) - 1:
@@ -64,11 +82,14 @@ def _gen_backprop_eqs(self):
else:
next_layer = None

eqs += self._layers[i].backprop_equations(prev_layer, next_layer,
self._batch_constant,
self._backward_arg_dict)
layer_eqs, layer_args = \
self._layers[i].backprop_equations(prev_layer, next_layer,
self._batch_constant)

args += layer_args
eqs += layer_eqs

return eqs
return (eqs, args)

@property
def pytorch_parameters(self):
@@ -79,7 +100,7 @@ def forward(self, input_data):
layer.result.data[:] = 0

self._layers[0].input.data[:] = input_data
self._forward_operator.apply()
self._forward_operator.apply(**self._forward_arg_dict)
return self._layers[-1].result.data

def backward(self, loss_gradient_func, pytorch_optimizer=None):
Loading