Skip to content

Commit

Permalink
remove batch_size, make tensor 2D instead of 3D
Browse files Browse the repository at this point in the history
  • Loading branch information
JernKunpittaya committed Jun 4, 2024
1 parent bfbd007 commit abee95d
Show file tree
Hide file tree
Showing 11 changed files with 64 additions and 68 deletions.
40 changes: 11 additions & 29 deletions examples/2.torch+state/torch+state.ipynb

Large diffs are not rendered by default.

12 changes: 6 additions & 6 deletions examples/3.state/state.ipynb

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions playground/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Nothing much useful here. Just a playground to test codes real quick.
Binary file added playground/example.onnx
Binary file not shown.
Empty file added playground/playground.ipynb
Empty file.
File renamed without changes.
10 changes: 5 additions & 5 deletions tests/test_computation.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def nested_computation(state: State, args: list[torch.Tensor]):
out_8 = state.covariance(x, y)
out_9 = state.correlation(y, z)
out_10 = state.linear_regression(x, y)
slope, intercept = out_10[0][0][0], out_10[0][1][0]
slope, intercept = out_10[0][0], out_10[1][0]
reshaped = torch.cat((
out_0.unsqueeze(0),
out_1.unsqueeze(0),
Expand All @@ -53,7 +53,7 @@ def nested_computation(state: State, args: list[torch.Tensor]):
out_9.unsqueeze(0),
slope.unsqueeze(0),
intercept.unsqueeze(0),
)).reshape(1,-1,1)
)).reshape(-1,1)
out_10 = state.mean(reshaped)
return out_10

Expand Down Expand Up @@ -125,9 +125,9 @@ def test_nested_computation(tmp_path, column_0: torch.Tensor, column_1: torch.Te
op_10 = ops[10]
assert isinstance(op_10, Regression)
out_10 = statistics.linear_regression(x.tolist(), y.tolist())
assert op_10.result.shape == (1, 2, 1)
assert_result(op_10.result[0][0][0], out_10.slope)
assert_result(op_10.result[0][1][0], out_10.intercept)
assert op_10.result.shape == ( 2, 1)
assert_result(op_10.result[0][0], out_10.slope)
assert_result(op_10.result[1][0], out_10.intercept)

op_11 = ops[11]
assert isinstance(op_11, Mean)
Expand Down
6 changes: 3 additions & 3 deletions tests/test_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,10 @@ def test_linear_regression(tmp_path, column_0: torch.Tensor, column_1: torch.Ten
expected_res = statistics.linear_regression(column_0.tolist(), column_1.tolist())
columns = [column_0, column_1]
regression = Regression.create(columns, error)
# shape = [1, 2, 1]
# shape = [2, 1]
actual_res = regression.result
assert_result(expected_res.slope, actual_res[0][0][0])
assert_result(expected_res.intercept, actual_res[0][1][0])
assert_result(expected_res.slope, actual_res[0][0])
assert_result(expected_res.intercept, actual_res[1][0])
class Model(IModel):
def forward(self, *x: list[torch.Tensor]) -> tuple[IsResultPrecise, torch.Tensor]:
return regression.ezkl(x), regression.result
Expand Down
16 changes: 12 additions & 4 deletions zkstats/computation.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,14 +196,22 @@ def _call_op(self, x: list[torch.Tensor], op_type: Type[Operation]) -> Union[tor
self.op_dict['Correlation']+=1
elif isinstance(op, Regression):
result_array = []
for ele in op.result.data[0]:
for ele in op.result.data:
result_array.append(ele[0].item())
if 'Regression' not in self.op_dict:
self.precal_witness['Regression_0'] = [result_array]
self.op_dict['Regression']=1
else:
self.precal_witness['Regression_'+str(self.op_dict['Regression'])] = [result_array]
self.op_dict['Regression']+=1
# for ele in op.result.data[0]:
# result_array.append(ele[0].item())
# if 'Regression' not in self.op_dict:
# self.precal_witness['Regression_0'] = [result_array]
# self.op_dict['Regression']=1
# else:
# self.precal_witness['Regression_'+str(self.op_dict['Regression'])] = [result_array]
# self.op_dict['Regression']+=1
# for verifier
else:
# print('Verifier side create')
Expand Down Expand Up @@ -252,13 +260,13 @@ def is_precise() -> IsResultPrecise:
is_precise_aggregated = torch.logical_and(is_precise_aggregated, res)
if self.isProver:
json.dump(self.precal_witness, open(self.precal_witness_path, 'w'))
return is_precise_aggregated, op.result+(x[0]-x[0])[0][0][0]
return is_precise_aggregated, op.result+(x[0]-x[0])[0][0]

elif current_op_index > len_ops - 1:
# Sanity check that current op index does not exceed the length of ops
raise Exception(f"current_op_index out of bound: {current_op_index=} > {len_ops=}")
else:
return op.result+(x[0]-x[0])[0][0][0]
return op.result+(x[0]-x[0])[0][0]


class IModel(nn.Module):
Expand Down Expand Up @@ -302,7 +310,7 @@ def forward(self, *x: list[torch.Tensor]) -> tuple[IsResultPrecise, torch.Tensor
# print('x sy: ')
result = computation(state, x)
if len(result) ==1:
return x[0][0][0][0]-x[0][0][0][0]+torch.tensor(1.0), result
return (x[0]-x[0])[0][0]+torch.tensor(1.0), result
else:
return result
# print('state:: ', state.aggregate_witness_path)
Expand Down
11 changes: 6 additions & 5 deletions zkstats/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -319,15 +319,15 @@ def _export_onnx(model: Type[IModel], data_tensor_array: list[torch.Tensor], mod
# Flips the neural net into inference mode
circuit.eval()
input_names = []
dynamic_axes = {}
# dynamic_axes = {}

data_tensor_tuple = ()
for i in range(len(data_tensor_array)):
data_tensor_tuple += (data_tensor_array[i],)
input_index = "input"+str(i+1)
input_names.append(input_index)
dynamic_axes[input_index] = {0 : 'batch_size'}
dynamic_axes["output"] = {0 : 'batch_size'}
# dynamic_axes[input_index] = {0 : 'batch_size'}
# dynamic_axes["output"] = {0 : 'batch_size'}

# Export the model
torch.onnx.export(circuit, # model being run
Expand All @@ -338,7 +338,8 @@ def _export_onnx(model: Type[IModel], data_tensor_array: list[torch.Tensor], mod
do_constant_folding=True, # whether to execute constant folding for optimization
input_names = input_names, # the model's input names
output_names = ['output'], # the model's output names
dynamic_axes=dynamic_axes)
# dynamic_axes=dynamic_axes
)


# mode is either "accuracy" or "resources"
Expand Down Expand Up @@ -436,7 +437,7 @@ def _process_data(
for col in col_array:
data = data_onefile[col]
data_tensor = torch.tensor(data, dtype = torch.float32)
data_tensor_array.append(torch.reshape(data_tensor, (1,-1,1)))
data_tensor_array.append(torch.reshape(data_tensor, (-1,1)))
sel_data.append(data)
# Serialize data into file:
# sel_data comes from `data`
Expand Down
36 changes: 20 additions & 16 deletions zkstats/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,10 +53,10 @@ def ezkl(self, x: list[torch.Tensor]) -> IsResultPrecise:

def to_1d(x: torch.Tensor) -> torch.Tensor:
x_shape = x.size()
# Only allows 1d array or [1, len(x), 1]
# Only allows 1d array or [len(x), 1]
if len(x_shape) == 1:
return x
elif len(x_shape) == 3 and x_shape[0] == 1 and x_shape[2] == 1:
elif len(x_shape) == 2 and x_shape[1] == 1:
return x.reshape(-1)
else:
raise Exception(f"Unsupported shape: {x_shape=}")
Expand Down Expand Up @@ -97,7 +97,7 @@ def create(cls, x: list[torch.Tensor], error: float, precal_witness:Optional[di

def ezkl(self, x: list[torch.Tensor]) -> IsResultPrecise:
x = x[0]
old_size = x.size()[1]
old_size = x.size()[0]
size = torch.sum(torch.where(x!=MagicNumber, 1.0, 0.0))
min_x = torch.min(x)
x = torch.where(x==MagicNumber,min_x-1, x)
Expand Down Expand Up @@ -141,7 +141,7 @@ def create(cls, x: list[torch.Tensor], error: float, precal_witness:Optional[di
return cls(torch.tensor(precal_witness['GeometricMean_'+str(op_dict['GeometricMean'])][0]), error)

def ezkl(self, x: list[torch.Tensor]) -> IsResultPrecise:
# Assume x is [1, n, 1]
# Assume x is [n, 1]
x = x[0]
size = torch.sum(torch.where(x!=MagicNumber, 1.0, 0.0))
x = torch.where(x==MagicNumber, 1.0, x)
Expand All @@ -166,7 +166,7 @@ def create(cls, x: list[torch.Tensor], error: float, precal_witness:Optional[dic


def ezkl(self, x: list[torch.Tensor]) -> IsResultPrecise:
# Assume x is [1, n, 1]
# Assume x is [n, 1]
x = x[0]
size = torch.sum(torch.where(x!=MagicNumber, 1.0, 0.0))
return torch.abs((self.result*torch.sum(torch.where(x==MagicNumber, 0.0, torch.div(1.0, x)))) - size)<=torch.abs(self.error*size)
Expand Down Expand Up @@ -237,15 +237,15 @@ def create(cls, x: list[torch.Tensor], error: float, precal_witness:Optional[di


def ezkl(self, x: list[torch.Tensor]) -> IsResultPrecise:
# Assume x is [1, n, 1]
# Assume x is [n, 1]
x = x[0]
min_x = torch.min(x)
old_size = x.size()[1]
old_size = x.size()[0]
x = torch.where(x==MagicNumber, min_x-1, x)
count_equal = torch.sum(torch.where(x==self.result, 1.0, 0.0))

count_check = 0
for ele in x[0]:
for ele in x:
bool1 = torch.sum(torch.where(x==ele[0], 1.0, 0.0))<=count_equal
bool2 = ele[0]==min_x-1
count_check += torch.logical_or(bool1, bool2)
Expand Down Expand Up @@ -538,16 +538,20 @@ def __init__(self, xs: list[torch.Tensor], y: torch.Tensor, error: float, preca

x_one = stacked_x(x_1ds)
result_1d = np.matmul(np.matmul(np.linalg.inv(np.matmul(x_one.transpose(), x_one)), x_one.transpose()), y_1d)
result = torch.tensor(result_1d, dtype = torch.float32).reshape(1, -1, 1)
# print('result: ', result)
# result = torch.tensor(result_1d, dtype = torch.float32).reshape(1, -1, 1)
result = torch.tensor(result_1d, dtype = torch.float32).reshape(-1,1)
super().__init__(result, error)
# print('result regression: ', result)
else:
if op_dict is None:
result = torch.tensor(precal_witness['Regression_0']).reshape(1,-1,1)
# result = torch.tensor(precal_witness['Regression_0']).reshape(1,-1,1)
result = torch.tensor(precal_witness['Regression_0']).reshape(-1,1)
elif 'Regression' not in op_dict:
result = torch.tensor(precal_witness['Regression_0']).reshape(1,-1,1)
# result = torch.tensor(precal_witness['Regression_0']).reshape(1,-1,1)
result = torch.tensor(precal_witness['Regression_0']).reshape(-1,1)
else:
result = torch.tensor(precal_witness['Regression_'+str(op_dict['Regression'])]).reshape(1,-1,1)
# result = torch.tensor(precal_witness['Regression_'+str(op_dict['Regression'])]).reshape(1,-1,1)
result = torch.tensor(precal_witness['Regression_'+str(op_dict['Regression'])]).reshape(-1,1)

# for ele in precal_witness['Regression']:
# precal_witness_arr.append(torch.tensor(ele))
Expand All @@ -565,9 +569,9 @@ def ezkl(self, args: list[torch.Tensor]) -> IsResultPrecise:
# infer y from the last parameter
y = args[-1]
y = torch.where(y==MagicNumber,0.0, y)
x_one = torch.cat((*args[:-1], torch.ones_like(args[0])), dim=2)
x_one = torch.where((x_one[:,:,0] ==MagicNumber).unsqueeze(-1), torch.tensor([0.0]*x_one.size()[2]), x_one)
x_t = torch.transpose(x_one, 1, 2)
x_one = torch.cat((*args[:-1], torch.ones_like(args[0])), dim = 1)
x_one = torch.where((x_one[:,0] ==MagicNumber).unsqueeze(-1), torch.tensor([0.0]*x_one.size()[1]), x_one)
x_t = torch.transpose(x_one, 0, 1)

left = x_t @ x_one @ self.result - x_t @ y
right = self.error*x_t @ y
Expand Down

0 comments on commit abee95d

Please sign in to comment.