Hybrid Quantum Convolutional Neural Networks with Cirq and Tensorflow-Quantum
Qui prendiamo in considerazione un modello ibrido dove nella prima parte si elaborano i dati con un circuito quantistico ed i dati di output vengono poi elaborati con un modello classico di Convolutional Neural Network. Dopo aver creato il Cluster State ed inserito i dati nel circuito questi vengono elaborati con la Quantum Convolution ed il Quantum Pooling in piĆ¹ processi paralleli i cui output vengono poi inseriti in un modello di CNN che tramite la backpropagation e Gradient Descent ci permette di creare un modello da utilizzare per la classificazione.
Programma in Python
!pip install cirq
!pip install tensorflow==2.1.0
!pip install tensorflow_quantum

import tensorflow as tf
import tensorflow_quantum as tfq
import cirq
import sympy
import numpy as np
import seaborn as sns
import collections
# visualization tools
%matplotlib inline
import matplotlib.pyplot as plt
from cirq.contrib.svg import SVGCircuit
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os

batch_size = 2000
epochs = 15
IMG_HEIGHT = 4
IMG_WIDTH = 4

URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'
path = tf.keras.utils.get_file('cats_and_dogs.zip', origin=URL, extract=True)
PATH = os.path.join(os.path.dirname(path), 'cats_and_dogs_filtered')
percorso = os.path.join(PATH, 'train')
percorso_cats = os.path.join(percorso, 'cats')
percorso_dogs = os.path.join(percorso, 'dogs')
num_cats = len(os.listdir(percorso_cats )
num_dogs = len(os.listdir(percorso_dogs ))

total_train = num_cats + num_dogs
train_data = train_image_generator.flow_from_directory(batch_size=nr,
directory=percorso,
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='binary')
esempio_image, _ = next(train_data)
def plotImages(images):
fig, axes = plt.subplots(1, 5, figsize=(28,28))
axes = axes.flatten()
for img, ax in zip( images, axes):
ax.imshow(img)
ax.axis('off')
plt.tight_layout()
plt.show()

plotImages(esempio_images[:5])



#data aumentation rotazione-flip-zoom
image_generata = ImageDataGenerator(
rescale=1./255,
rotation_range=45,
width_shift_range=.15,
height_shift_range=.15,
horizontal_flip=True,
zoom_range=0.5
)

train_image = image_generata.flow_from_directory(batch_size=batch_size,
directory=percorso,
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='binary')
#carico dati images e labels
XX,yy = next(train_image)

XX= XX[:,:,:,0] #elimino ultima dim
XX= XX[..., np.newaxis] #aggiungo dim vuota
def genera_data(x,y):
"""Genera training and testing data"""
qubits = cirq.GridQubit.rect(1, 16)
nr = len(x) # Produces n_rounds x input image
train = []
labels = []
for n in range(nr):
randparameter = np.random.uniform(high=2 * np.pi, size=(1, 16))
randp=randparameter[0]
circuit = cirq.Circuit()
beta = np.ndarray.flatten(x)
for i,bit in enumerate(qubits):
circuit += (cirq.Circuit(cirq.ry(beta[i])(bit)))
train.append(circuit)
labels.append(1 if (y[n] == 1 ) else -1)
perc = int(len(train) * 0.7)
x_train = train[:perc]
test= train[perc:]
train_label = labels[:perc]
test_label = labels[perc:]
return tfq.convert_to_tensor(x_train), np.array(train_label), \
tfq.convert_to_tensor(test), np.array(test_label)

#Generazione dati da inserire nel modello - img inserita nel circuito per estrarre dati
#e separazione record tra training set e test set
tm, tlm, teem, telm = genera_data(XX,yy)

def one_qubit_unitary(bit, symbols):
"""creo circuit con rotazione asse X,Y,Z"""
return cirq.Circuit(
cirq.X(bit)**symbols[0],
cirq.Y(bit)**symbols[1],
cirq.Z(bit)**symbols[2])

def two_qubit_unitary(bits, symbols):
"""Creo circuit 2 qubit unitary."""
circuit = cirq.Circuit()
circuit += one_qubit_unitary(bits[0], symbols[0:3])
circuit += one_qubit_unitary(bits[1], symbols[3:6])
circuit += [cirq.ZZ(*bits)**symbols[7]]
circuit += [cirq.YY(*bits)**symbols[8]]
circuit += [cirq.XX(*bits)**symbols[9]]
circuit += one_qubit_unitary(bits[0], symbols[9:12])
circuit += one_qubit_unitary(bits[1], symbols[12:])
return circuit

def two_qubit_pool(source_qubit, out_qubit, symbols):
""""circuit con parameterized pooling ridurre entanglement da 2 qubits ad 1"""
pool_circuit = cirq.Circuit()
oqu = one_qubit_unitary(out_qubit, symbols[0:3])
source_oqu = one_qubit_unitary(source_qubit, symbols[3:6])
pool_circuit.append(oqu)
pool_circuit.append(source_oqu)
pool_circuit.append(cirq.CNOT(control=source_qubit, target=out_qubit))
pool_circuit.append(oqu**-1)
return pool_circuit

def quantum_conv_circuit(bits, symbols):
"""Quantum Convolution Layer two_qubit_unitary ai qubits pari"""
circuit = cirq.Circuit()
for first, second in zip(bits[0::2], bits[1::2]):
circuit += two_qubit_unitary([first, second], symbols)
for first, second in zip(bits[1::2], bits[2::2] + [bits[0]]):
circuit += two_qubit_unitary([first, second], symbols)
return circuit

def quantum_pool_circuit(source_bits, dest_bits, symbols):
"""quantum pooling operation rileva info importanti - da 2 qubit ad 1 """
circuit = cirq.Circuit()
for source, dest in zip(source_bits, dest_bits):
circuit += two_qubit_pool(source, dest, symbols)
return circuit

testQuantum = cirq.GridQubit.rect(1, 16)

SVGCircuit(
quantum_pool_circuit(testQuantum[:8], testQuantum[8:], sympy.symbols('x0:6')))




def clusterStateCircuit(qubits):
circuit = cirq.Circuit()
circuit.append(cirq.H.on_each(bits))
for bit1,bit2 in zip(bits, bits[1:] + [bits[0]]):
circuit.append(cirq.CZ(bit1, bit2))
return circuit

# Custom accuracy metric.
@tf.function
def custom_accuracy(y_true, y_pred):
y_true = tf.squeeze(y_true)
y_pred = tf.map_fn(lambda x: 1.0 if x >= 0 else -1.0, y_pred)
return tf.keras.backend.mean(tf.keras.backend.equal(y_true, y_pred))



circuitO = cirq.GridQubit.rect(1, 16)
readouts = [cirq.Z(bit) for bit in circuitO[8:]]
def readoutModelCircuit(qubits):
#circuito con 1 solo Q_conv e Q_pool
circuit = cirq.Circuit()
symbols = sympy.symbols('qconv0:21')
circuit += quantum_conv_circuit(qubits, symbols[0:15])
circuit += quantum_pool_circuit(qubits[:4], qubits[4:], symbols[15:21])
return circuit

inputLayer = tf.keras.Input(shape=(), dtype=tf.dtypes.string)

csm = tfq.layers.AddCircuit()(
inputLayer , prepend=clusterStateCircuit(cluster_state))

# apply 3 different filters and measure expectation values
qmodel1 = tfq.layers.PQC(
readoutModelCircuit(circuitO),
readouts)(csm)
qmodel2 = tfq.layers.PQC(
readoutModelCircuit(circuitO),
readouts)(csm)
qmodel3 = tfq.layers.PQC(
readoutModelCircuit(circuitO),
readouts)(csm)

# concatenate outputs and feed into a small classical NN
qmodel4= tf.keras.layers.concatenate(
[qmodel1, qmodel2, qmodel3])

dense1 = tf.keras.layers.Dense(8)(qmodel4)
dense2 = tf.keras.layers.Dense(32)(dense1)
dense3 = tf.keras.layers.Dense(1)(dense2)

model = tf.keras.Model(inputs=[inputLayer],
outputs=[dense3])

# Display the model architecture
tf.keras.utils.plot_model(model,
show_shapes=True,
show_layer_names=True,
dpi=70)


model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0002),
loss=tf.losses.mse,
metrics=[custom_accuracy])

#elaborazione del modello
history = model.fit(x=tm,
y=tlm,
batch_size=64,
epochs=5,
verbose=1,
validation_data=(teem,
telm))

plt.plot(history.history['loss'], label='Training')
plt.plot(history.history['val_loss'], label='Validation')
plt.title('Training Quantum CNN to Classification Cats and Dogs')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()


plt.plot(history.history['custom_accuracy'][:25], label='Training Hybrid QCNN')
plt.plot(history.history['val_custom_accuracy'][:25], label='Testing Hybrid QCNN')
plt.title('Training Quantum CNN to Classification Cats and Dogs')
plt.xlabel('Epochs')
plt.legend()
plt.ylabel('Accuracy')
plt.show()