Using ONNX
Simple example
import numpy
import onnx
import tensorflow
import tf2onnx
from concrete.ml.torch.compile import compile_onnx_model
from concrete.fhe.compilation import Configuration
class FC(tensorflow.keras.Model):
"""A fully-connected model."""
def __init__(self):
super().__init__()
hidden_layer_size = 10
output_size = 5
self.dense1 = tensorflow.keras.layers.Dense(
hidden_layer_size,
activation=tensorflow.nn.relu,
)
self.dense2 = tensorflow.keras.layers.Dense(output_size, activation=tensorflow.nn.relu6)
self.flatten = tensorflow.keras.layers.Flatten()
def call(self, inputs):
"""Forward function."""
x = self.flatten(inputs)
x = self.dense1(x)
x = self.dense2(x)
return self.flatten(x)
n_bits = 6
input_output_feature = 2
input_shape = (input_output_feature,)
num_inputs = 1
n_examples = 5000
# Define the Keras model
keras_model = FC()
keras_model.build((None,) + input_shape)
keras_model.compute_output_shape(input_shape=(None, input_output_feature))
# Create random input
input_set = numpy.random.uniform(-100, 100, size=(n_examples, *input_shape))
# Convert to ONNX
tf2onnx.convert.from_keras(keras_model, opset=14, output_path="tmp.model.onnx")
onnx_model = onnx.load("tmp.model.onnx")
onnx.checker.check_model(onnx_model)
# Compile
quantized_module = compile_onnx_model(
onnx_model, input_set, n_bits=2
)
# Create test data from the same distribution and quantize using
# learned quantization parameters during compilation
x_test = tuple(numpy.random.uniform(-100, 100, size=(1, *input_shape)) for _ in range(num_inputs))
y_clear = quantized_module.forward(*x_test, fhe="disable")
y_fhe = quantized_module.forward(*x_test, fhe="execute")
print("Execution in clear: ", y_clear)
print("Execution in FHE: ", y_fhe)
print("Equality: ", numpy.sum(y_clear == y_fhe), "over", numpy.size(y_fhe), "values")Quantization Aware Training
Supported operators
Last updated
Was this helpful?