21 Building the ONNX Exporter
Let’s implement export from TensorWeaver to ONNX.
21.1 The Exporter Class
import numpy as np
import onnx
from onnx import helper, TensorProto
class ONNXExporter:
"""Export TensorWeaver models to ONNX format."""
def __init__(self):
self.nodes = [] # ONNX operations
self.initializers = [] # Weights
self.inputs = [] # Model inputs
self.outputs = [] # Model outputs
self.counter = 0 # For unique names
def unique_name(self, prefix="tensor"):
"""Generate unique tensor name."""
self.counter += 1
return f"{prefix}_{self.counter}"
Note
Code Reference: See src/tensorweaver/onnx/onnx_program.py for the full implementation.
21.2 Adding Initializers (Weights)
def add_initializer(self, data, name=None):
"""Add a weight tensor to the model."""
name = name or self.unique_name("weight")
tensor = helper.make_tensor(
name=name,
data_type=TensorProto.FLOAT,
dims=data.shape,
vals=data.flatten().tolist()
)
self.initializers.append(tensor)
return name21.3 Exporting Operations
21.3.1 MatMul
def export_matmul(self, input_a, input_b):
"""Export matrix multiplication."""
output_name = self.unique_name("matmul_out")
node = helper.make_node(
'MatMul',
inputs=[input_a, input_b],
outputs=[output_name]
)
self.nodes.append(node)
return output_name
Note
Code Reference: See src/tensorweaver/onnx/ops/matmul.py
21.3.2 Add
def export_add(self, input_a, input_b):
"""Export addition."""
output_name = self.unique_name("add_out")
node = helper.make_node(
'Add',
inputs=[input_a, input_b],
outputs=[output_name]
)
self.nodes.append(node)
return output_name
Note
Code Reference: See src/tensorweaver/onnx/ops/add.py
21.3.3 ReLU
def export_relu(self, input_name):
"""Export ReLU activation."""
output_name = self.unique_name("relu_out")
node = helper.make_node(
'Relu',
inputs=[input_name],
outputs=[output_name]
)
self.nodes.append(node)
return output_name21.3.4 Softmax
def export_softmax(self, input_name, axis=-1):
"""Export softmax."""
output_name = self.unique_name("softmax_out")
node = helper.make_node(
'Softmax',
inputs=[input_name],
outputs=[output_name],
axis=axis
)
self.nodes.append(node)
return output_name21.3.5 LayerNorm
def export_layer_norm(self, input_name, layer):
"""Export LayerNorm layer."""
output_name = self.unique_name("layernorm_out")
# Add scale (gamma) and bias (beta) as initializers
scale_name = self.add_initializer(layer.gamma.data, "ln_scale")
bias_name = self.add_initializer(layer.beta.data, "ln_bias")
# ONNX LayerNormalization op (opset 17+)
node = helper.make_node(
'LayerNormalization',
inputs=[input_name, scale_name, bias_name],
outputs=[output_name],
epsilon=layer.eps,
axis=-1 # Normalize over last dimension
)
self.nodes.append(node)
return output_name
Tip
Note: LayerNormalization was added in ONNX opset 17. For older opsets, you’d need to decompose it into ReduceMean, Sub, Pow, Add, Sqrt, Div, and Mul operations.
21.4 Exporting a Linear Layer
def export_linear(self, input_name, layer):
"""Export a Linear layer."""
# Add weight as initializer
weight_name = self.add_initializer(
layer.weight.data.T, # ONNX uses transposed weights
f"linear_weight"
)
# MatMul: input @ weight
matmul_out = self.export_matmul(input_name, weight_name)
# Add bias if present
if layer.bias is not None:
bias_name = self.add_initializer(layer.bias.data, f"linear_bias")
return self.export_add(matmul_out, bias_name)
return matmul_out21.5 Tracing the Model
Walk through the model structure:
def trace_module(self, module, input_name):
"""Recursively trace a module."""
if isinstance(module, Linear):
return self.export_linear(input_name, module)
elif isinstance(module, Sequential):
current = input_name
for child in module._modules.values():
current = self.trace_module(child, current)
return current
elif isinstance(module, ReLU):
return self.export_relu(input_name)
elif isinstance(module, LayerNorm):
return self.export_layer_norm(input_name, module)
elif isinstance(module, Dropout):
# Dropout is identity at inference
return input_name
else:
raise ValueError(f"Unsupported module: {type(module)}")21.6 Building the Complete Model
def export(self, model, input_shape, output_path):
"""Export model to ONNX file."""
model.eval() # Set to inference mode
# Define input
input_name = "input"
self.inputs.append(helper.make_tensor_value_info(
input_name,
TensorProto.FLOAT,
input_shape # e.g., [None, 4] for dynamic batch
))
# Trace model
output_name = self.trace_module(model, input_name)
# Define output
self.outputs.append(helper.make_tensor_value_info(
output_name,
TensorProto.FLOAT,
None # Shape inferred
))
# Build graph
graph = helper.make_graph(
nodes=self.nodes,
name="tensorweaver_model",
inputs=self.inputs,
outputs=self.outputs,
initializer=self.initializers
)
# Build model (opset 17 for LayerNormalization support)
onnx_model = helper.make_model(
graph,
opset_imports=[helper.make_opsetid("", 17)]
)
# Validate
onnx.checker.check_model(onnx_model)
# Save
onnx.save(onnx_model, output_path)
print(f"Exported to {output_path}")
return onnx_model21.7 Exporting Our Iris Model
from tensorweaver.nn import Sequential, Linear, ReLU
from tensorweaver.onnx import ONNXExporter
# Trained model
model = Sequential(
Linear(4, 16),
ReLU(),
Linear(16, 3)
)
# ... assume model is trained ...
# Export
exporter = ONNXExporter()
exporter.export(
model,
input_shape=['batch', 4], # Dynamic batch size
output_path='iris_classifier.onnx'
)21.8 Handling Dynamic Shapes
For variable batch sizes:
# Static shape
input_shape = [32, 4] # Fixed batch of 32
# Dynamic batch
input_shape = ['batch', 4] # Variable batch size
# In ONNX helper
helper.make_tensor_value_info(
"input",
TensorProto.FLOAT,
['batch', 4] # String = dynamic dimension
)21.9 Summary
- ONNXExporter traces model and builds ONNX graph
- Each layer type has an export function
- Weights become initializers
- Operations become nodes
- Validate with
onnx.checker.check_model()
Next: Running inference with ONNX Runtime.