Back to Onnxruntime

Building a FC model for MNIST

orttraining/tools/mnist_model_builder/mnist_fc_builder.ipynb

1.25.13.2 KB
Original Source
python
import onnx
from onnx import helper, shape_inference
from onnx import TensorProto
import onnx.optimizer

Building a FC model for MNIST

python
import math
import numpy as np
import scipy.stats as stats

def truncated_normal(dims):   
    dim0, dim1 = dims
    mu, stddev = 0, 1/math.sqrt(dim0)
    lower, upper = -2 * stddev, 2 * stddev
    X = stats.truncnorm( (lower - mu) / stddev, (upper - mu) / stddev, loc = mu, scale = stddev)

    return X.rvs(dim0 * dim1).tolist()

def zeros(dim):
    return [0] * dim[0]
   
python
W1_dims = [784, 128]
W2_dims = [128, 32]
W3_dims = [32, 10]

W1 =  onnx.helper.make_tensor(name="W1", data_type=onnx.TensorProto.FLOAT, dims=W1_dims, vals=truncated_normal(W1_dims))
W2 =  helper.make_tensor(name="W2", data_type=onnx.TensorProto.FLOAT, dims=W2_dims, vals=truncated_normal(W2_dims))
W3 =  helper.make_tensor(name="W3", data_type=onnx.TensorProto.FLOAT, dims=W3_dims, vals=truncated_normal(W3_dims))

B1_dims = [128]
B2_dims = [32]
B3_dims = [10]

B1 =  helper.make_tensor(name="B1", data_type=onnx.TensorProto.FLOAT, dims=B1_dims, vals=zeros(B1_dims))
B2 =  helper.make_tensor(name="B2", data_type=onnx.TensorProto.FLOAT, dims=B2_dims, vals=zeros(B2_dims))
B3 =  helper.make_tensor(name="B3", data_type=onnx.TensorProto.FLOAT, dims=B3_dims, vals=zeros(B3_dims))
python
node1 = helper.make_node('MatMul', inputs=['X', 'W1'], outputs=['T1'])
node2 = helper.make_node('Add', inputs=['T1', 'B1'], outputs=['T2'])
node3 = helper.make_node('Relu', inputs=['T2'], outputs=['T3'])

node4 = helper.make_node('MatMul', inputs=['T3', 'W2'], outputs=['T4'])
node5 = helper.make_node('Add', inputs=['T4', 'B2'], outputs=['T5'])
node6 = helper.make_node('Relu', inputs=['T5'], outputs=['T6'])

node7 = helper.make_node('MatMul', inputs=['T6', 'W3'], outputs=['T7'])
node8 = helper.make_node('Add', inputs=['T7', 'B3'], outputs=['predictions'])

graph = helper.make_graph(
    [node1, node2, node3, node4, node5, node6, node7, node8],
    'fully_connected_mnist',
    [helper.make_tensor_value_info('X', TensorProto.FLOAT, ([-1, 784])),
     helper.make_tensor_value_info('W1', TensorProto.FLOAT, W1_dims),
     helper.make_tensor_value_info('W2', TensorProto.FLOAT, W2_dims),
     helper.make_tensor_value_info('W3', TensorProto.FLOAT, W3_dims),
     helper.make_tensor_value_info('B1', TensorProto.FLOAT, B1_dims),
     helper.make_tensor_value_info('B2', TensorProto.FLOAT, B2_dims),
     helper.make_tensor_value_info('B3', TensorProto.FLOAT, B3_dims),
    ],
    [helper.make_tensor_value_info('predictions', TensorProto.FLOAT, ([-1, 10]))],
    [W1, W2, W3, B1, B2, B3]
)
original_model = helper.make_model(graph, producer_name='onnx-examples')
python
onnx.checker.check_model(original_model)
python
inferred_model = shape_inference.infer_shapes(original_model)
onnx.save_model(inferred_model, "mnist_fc.onnx")

Inferencing session

python
import onnxruntime as lotus
import numpy as np
python
sess = lotus.InferenceSession('mnist_fc_model_with_cost.onnx', None)
python
X_dims = [1, 784]

data = np.random.uniform(size=X_dims).astype(np.float32)
labels = np.zeros(10).astype(np.float32)
labels[3] = 1

result = sess.run(['predictions', 'loss'], {'X': data, 'labels': labels})

print(result[0])

print(result[1])