The task is to classify the input vector of dim (12,) into three categories using a quantum neural network. I am following the data Re-uploading scheme to do the task which resembles to with a TensorFlow Data re-uploading tutorial, Parametrized Quantum Circuits for Reinforcement Learning.
Since we have to different circuits, model and encoder circuit which is shown below.
The trainable parameters are suffixed with T and input parameters(fixed) are prefixed with I.
So, I know the input to the model input shape should be of size (12, ). But since my circuit is of 3-qubit , I need to make changes in to the call function to make it work, particularly in tf.einsum(probably). Below code shows implements reuploading pqc layer.
class ReUploadingPQC(tf.keras.layers.Layer):
"""
Performs the transformation (s_1, ..., s_d) -> (theta_1, ..., theta_N, lmbd[1][1]s_1, ..., lmbd[1][M]s_1,
......., lmbd[d][1]s_d, ..., lmbd[d][M]s_d) for d=input_dim, N=theta_dim and M=n_layers.
An activation function from tf.keras.activations, specified by `activation` ('linear' by default) is
then applied to all lmbd[i][j]s_i.
All angles are finally permuted to follow the alphabetical order of their symbol names, as processed
by the ControlledPQC.
"""
def __init__(self, qubits, n_layers, observables, activation="linear", name="re-uploading_PQC"):
super(ReUploadingPQC, self).__init__(name=name)
self.n_layers = n_layers
self.n_qubits = len(qubits)
self.observables = observables
circuit, theta_symbols, input_symbols = generate_circuit(qubits, n_layers)
self.circuit = circuit
theta_init = tf.random_uniform_initializer(minval=0.0, maxval=np.pi)
self.theta = tf.Variable(
initial_value=theta_init(shape=(1, len(theta_symbols)), dtype="float32"),
trainable=True, name="thetas"
)
lmbd_init = tf.ones(shape=(self.n_qubits * self.n_layers,))
self.lmbd = tf.Variable(
initial_value=lmbd_init, dtype="float32", trainable=True, name="lambdas"
)
# Define explicit symbol order.
symbols = [str(symb) for symb in theta_symbols + input_symbols]
self.indices = tf.constant([symbols.index(a) for a in sorted(symbols)])
self.activation = activation
self.empty_circuit = tfq.convert_to_tensor([cirq.Circuit()])
self.computation_layer = tfq.layers.ControlledPQC(self.circuit, self.observables)
def call(self, inputs):
# inputs[0] = encoding data for the state.
batch_dim = tf.gather(tf.shape(inputs[0]), 0)
tiled_up_circuits = tf.repeat(self.empty_circuit, repeats=batch_dim)
tiled_up_thetas = tf.tile(self.theta, multiples=[batch_dim, 1])
tiled_up_inputs = tf.tile(inputs[0], multiples=[1, self.n_layers])
scaled_inputs = tf.einsum("i,ji->ji", self.lmbd, tiled_up_inputs)
squashed_inputs = tf.keras.layers.Activation(self.activation)(scaled_inputs)
joined_vars = tf.concat([tiled_up_thetas, squashed_inputs], axis=1)
joined_vars = tf.gather(joined_vars, self.indices, axis=1)
return self.computation_layer([tiled_up_circuits, joined_vars])
The function generate_cicuit() is used to get the model show above which is also called when building the model.
def generate_circuit(qubits, n_layers=1):
n_qubits = len(qubits)
params = sympy.symbols(f'T:{n_layers*12}')
params = np.asarray(params).reshape((n_layers, 4, n_qubits))
inputs = sympy.symbols(f'I:{n_layers*12}')
inputs = np.asarray(inputs).reshape((n_layers, 12))
ckt = cirq.Circuit()
for l in range(n_layers):
ckt.append(single_rot_ry(params[l, 0, :], qubits), strategy=InsertStrategy.INLINE)
ckt.append(single_rot_rz(params[l, 1, :], qubits), strategy=InsertStrategy.INLINE)
ckt.append(do_entangle_cnot(qubits), strategy=InsertStrategy.INLINE)
ckt.append(get_ckt(inputs[l, :], qubits), strategy=InsertStrategy.INLINE)
ckt.append(single_rot_ry(params[l, 2, :], qubits), strategy=InsertStrategy.INLINE)
ckt.append(single_rot_rz(params[l, 3, :], qubits), strategy=InsertStrategy.INLINE)
return ckt, list(params.flat), list(inputs.flat)
The when I try to create model using this class with the code below, I get error.
def generate_model_policy(qubits, n_layers, n_actions, beta, observables):
"""Generates a Keras model for a data re-uploading PQC policy."""
input_tensor = tf.keras.Input(shape=(12, ), dtype=tf.dtypes.float32, name='input')
re_uploading_pqc = ReUploadingPQC(qubits, n_layers, observables)([input_tensor])
process = tf.keras.Sequential([
Alternating(n_actions),
tf.keras.layers.Lambda(lambda x: x * beta),
tf.keras.layers.Dense(3, activation="softmax"),
], name="observables-policy")
policy = process(re_uploading_pqc)
model = tf.keras.Model(inputs=[input_tensor], outputs=policy)
return model
qubits = cirq.GridQubit.rect(1, 3)
n_layers = 1
n_actions = 3
observables = [cirq.Z(q) for q in qubits]
model = generate_model_policy(qubits, n_layers, n_actions, 1.0, observables)
The error is
Exception encountered when calling layer "re-uploading_PQC" (type ReUploadingPQC).
in user code:
File "/tmp/ipykernel_1109761/1735579080.py", line 45, in call *
scaled_inputs = tf.einsum("i,ji->ji", self.lmbd, tiled_up_inputs)
ValueError: Dimensions must be equal, but are 3 and 12 for '{{node re-uploading_PQC/einsum/Einsum}} = Einsum[N=2, T=DT_FLOAT, equation="i,ji->ji"](re-uploading_PQC/einsum/Einsum/ReadVariableOp, re-uploading_PQC/Tile_1)' with input shapes: [3], [?,12].
Call arguments received:
• inputs=['tf.Tensor(shape=(None, 12), dtype=float32)'
So, I need to make changes in the call function but I am not able understand what to change.