Key Concepts
import numpy
from concrete.numpy.compilation import compiler
# Let's assume Quantization has been applied and we are left with integers only.
# This is essentially the work of Concrete-ML
# Some parameters (weight and bias) for our model taking a single feature
w = [2]
b = 2
# The function that implements our model
@compiler({"x": "encrypted"})
def linear_model(x):
return w @ x + b
# A representative inputset is needed to compile the function
# (used for tracing)
n_bits_input = 2
inputset = numpy.arange(0, 2**n_bits_input).reshape(-1, 1)
circuit = linear_model.compile(inputset)
# Use the API to get the maximum bitwidth in the circuit
max_bitwidth = circuit.graph.maximum_integer_bit_width()
print("Max bitwidth = ", max_bitwidth)
# Max bitwidth = 4
# Test our FHE inference
circuit.encrypt_run_decrypt(numpy.array([3]))
# 8
# Print the graph of the circuit
print(circuit)
# %0 = 2 # ClearScalar<uint2>
# %1 = [2] # ClearTensor<uint2, shape=(1,)>
# %2 = x # EncryptedTensor<uint2, shape=(1,)>
# %3 = matmul(%1, %2) # EncryptedScalar<uint3>
# %4 = add(%3, %0) # EncryptedScalar<uint4>
# return %4Quantization
Quantized model accuracy
Limitations for FHE friendly neural networks
Last updated
Was this helpful?