Ansatz parameters not getting updated in custom QRNN model (qiskit)

92 views Asked by At

I'm trying to develop a Hybrid-qRNN model to predict a cosine wave. For this, I'm passing 7 values of x(t) and the output is based on the 7th value's output. However, when I call loss.backward() on this predicted output vs the actual y value, the ansatz weights do not get updated. I defined the ansatz based on this paper: https://arxiv.org/abs/2302.03244, and used EstimatorQNN to define the qlayer. I then used TorchConnector as I want the model to compute grad only w.r.t the 7th value (i.e. I define an loop from i=0:7 and only compute grad on the output AFTER exiting the loop).

Here's the code to reproduce my results:

#Create ansatz with this
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from numpy import pi
from qiskit.circuit import Parameter
from qiskit.circuit import ParameterVector
def ansatz_ckt():
    qreg_q = QuantumRegister(6, 'q')
    creg_c = ClassicalRegister(1, 'c')
    params1 = ParameterVector("w",24)
    circuit = QuantumCircuit(qreg_q,creg_c)
    circuit.rx(params1[1], qreg_q[0])
    circuit.rx(params1[2], qreg_q[1])
    circuit.rx(params1[3], qreg_q[2])
    circuit.rx(params1[4], qreg_q[3])
    circuit.rx(params1[5], qreg_q[4])
    circuit.rx(params1[6], qreg_q[5])
    circuit.rz(params1[7], qreg_q[0])
    circuit.rz(params1[8], qreg_q[1])
    circuit.rz(params1[9], qreg_q[2])
    circuit.rz(params1[10], qreg_q[3])
    circuit.rz(params1[11], qreg_q[4])
    circuit.rz(params1[12], qreg_q[5])
    circuit.rx(params1[13], qreg_q[0])
    circuit.rx(params1[14], qreg_q[1])
    circuit.rx(params1[15], qreg_q[2])
    circuit.rx(params1[16], qreg_q[3])
    circuit.rx(params1[17], qreg_q[4])
    circuit.rx(params1[18], qreg_q[5])
    circuit.cnot(qreg_q[0],qreg_q[1])
    circuit.rz(params1[19],qreg_q[1])
    circuit.cnot(qreg_q[0],qreg_q[1])
    circuit.cnot(qreg_q[1],qreg_q[2])
    circuit.rz(params1[20],qreg_q[2])
    circuit.cnot(qreg_q[1],qreg_q[2])
    circuit.cnot(qreg_q[2],qreg_q[3])
    circuit.rz(params1[21],qreg_q[3])
    circuit.cnot(qreg_q[2],qreg_q[3])
    circuit.cnot(qreg_q[3],qreg_q[4])
    circuit.rz(params1[22],qreg_q[4])
    circuit.cnot(qreg_q[3],qreg_q[4])
    circuit.cnot(qreg_q[4],qreg_q[5])
    circuit.rz(params1[23],qreg_q[5])
    circuit.cnot(qreg_q[4],qreg_q[5])
    circuit.cnot(qreg_q[5],qreg_q[0])
    circuit.rz(params1[0],qreg_q[0])
    circuit.cnot(qreg_q[5],qreg_q[0])
    return circuit,params1
#circuit.measure(qreg_q[0], creg_c[0])

I then create the circuit here:

import torch
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit.circuit import ParameterVector
from torch.nn import MSELoss
from qiskit.circuit import Parameter

from qiskit_machine_learning.connectors import TorchConnector
from qiskit_machine_learning.neural_networks import EstimatorQNN, SamplerQNN
from qiskit.quantum_info import SparsePauliOp

def MKCircuit(numQubits):
    

    params_i = [Parameter("input1")]
    qreg_q = QuantumRegister(6, 'q')
    creg_c = ClassicalRegister(1, 'c')
    circuit_x = QuantumCircuit(qreg_q)

    circuit_x.ry(params_i[0], qreg_q[0])
    circuit_x.ry(params_i[0], qreg_q[1])
    circuit_x.ry(params_i[0], qreg_q[2])
    #fm = QuantumCircuit(QuantumRegister(numQubits), ClassicalRegister(1))
    #input_parameters = ParameterVector("theta", 2*numQubits)
    #performing the input data encoding as a RY and a RZ for
    #2 bits of data per qubit
    #for i in range(numQubits):
    #    fm.ry(input_parameters[i], i)
    #    fm.rz(input_parameters[numQubits+i], i)
    circuit_x.barrier()
    ansatz,weights = ansatz_ckt()
    #print(ansatz)
    #ansatz.measure(0,0)
    # the ansatz is that upon entangling each qubit to another input parameter
    # the circuit will train like a bounded 5D clock
    #weights = ParameterVector("w", numQubits)
    '''ansatz = QuantumCircuit(QuantumRegister(numQubits+1), ClassicalRegister(1))
    for i in range(numQubits-1):
        ansatz.crx(weights[i], i, i+1)
    #dont forget to close the clock and entangle the last qubit to the first qubit
    ansatz.crx(weights[-1], numQubits-1,0)

    # now if all works as calculated, the result of the
    # 5D clock projected to this ancilla
    # qubit should train to the binary classification of my label map
    ansatz.cx(numQubits-1 , numQubits)
    #Only measure the ancilla qubit
    ansatz.measure(numQubits, 0)'''

    # weights = ParameterVector("w", 4)

    qnn_qc = QuantumCircuit(QuantumRegister(numQubits), ClassicalRegister(1))
    qnn_qc.compose(circuit_x, inplace=True)
    qnn_qc.compose(ansatz, inplace=True)
    
    return qnn_qc, params_i, weights

This is the code I used to create the training data:

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
def cos_preprocess():
    x_t = [i for i in range(0,100)]
    rad = 8/199
    y_t = []
    t_idk = []
    for i in x_t:
        t_dash = rad*i
        t_idk.append(t_dash)
        y_t.append(np.cos(np.pi*t_dash)/2)
    #x_t = np.array(x_t)
    #y_t = np.array(y_t)
    #x_t = np.reshape(x_t, newshape = (x_t.shape[0],1))
    #y_t = np.reshape(y_t, newshape = (y_t.shape[0],1))
    #for i in range(x_t.shape[0]):
    #    print(x_t[i][0])

    sc = MinMaxScaler(feature_range = (0, 1))

    X_train = []
    y_train = []
    x_t = np.array(x_t)
    x_t = x_t.reshape(-1,1)
    training_set_scaled = sc.fit_transform(x_t)
    for i in range(7, len(training_set_scaled)):
        X_train.append(training_set_scaled[i-7: i, 0])
        y_train.append(training_set_scaled[i, 0])
    X_train, y_train = np.array(X_train), np.array(y_train)
    X_train = np.reshape(X_train, newshape = (X_train.shape[0], X_train.shape[1], 1))
    return X_train,y_train

And finally, here is the code I used to train the model:

import matplotlib.pyplot as plt
def train_custom(qnn_qc, input_parameters, weights):

    #Does this measure the right qubit?
    op = SparsePauliOp.from_list([("IIIIIZ", 1)])
    qnn = EstimatorQNN(
    circuit=qnn_qc,
    input_params=input_parameters,
    weight_params=weights,
    observables = op
    
    )
    np.random.seed(42)
    estimator_qnn_weights = np.random.random(qnn.num_weights)
    #print(estimator_qnn_weights)
    model = TorchConnector(qnn)
    model.train()
    X_train,y_train = cos_preprocess()
    #print(X_train.shape)
    #features = torch.rand((20, 6))
    #labels = torch.randint(0, 2, (20, 2)) * 2. - 1

    optimizer = torch.optim.Adam(model.parameters(), lr=0.03)
    criterion = MSELoss()
    y_predicted = []
    epochs = 1
    total_loss = []
    hybrid_loss_list = []  # Store loss history

    for epoch in range(epochs):
        for i in range(len(X_train[:,0,:])):
            optimizer.zero_grad()
            if i%10==0:
                for name, params in model.named_parameters():
                    print(params)
            for j in range(0,7):
                
                
                output = model(torch.Tensor(X_train[i][j]))

            y_t = torch.tensor(output,requires_grad=True)
            y_idk = torch.Tensor(np.array(y_train[i]))
            y_idk = torch.reshape(y_idk, (-1,))
            #print(f"Predicted: {y_t}, Target: {y_idk}")
            #Save the losses to compare
            y_predicted.append(y_t)
            loss = criterion(y_t, y_idk)
            #print(f"Iteration: {i}, loss: {loss.item()}")
            loss.backward()
            optimizer.step()
            #print(loss)
            #Early stopping condition
            if len(total_loss)>0:
                if loss.item() > total_loss[-1]:
                    break
            total_loss.append(loss.item())
        
        hybrid_loss_list.append(sum(total_loss) / len(total_loss))
        print("Training [{:.0f}%]\tLoss: {:.4f}".format(100.0 * (epoch + 1) / epochs, hybrid_loss_list[-1]))
if __name__ == '__main__':
    
    circuit, input_params, weights = MKCircuit(6)
    print("Weights:",weights)
    train_custom(circuit, input_params, weights)

When I ran all of the above, my output was:

Weights: w, ['w[0]', 'w[1]', 'w[2]', 'w[3]', 'w[4]', 'w[5]', 'w[6]', 'w[7]', 'w[8]', 'w[9]', 'w[10]', 'w[11]', 'w[12]', 'w[13]', 'w[14]', 'w[15]', 'w[16]', 'w[17]', 'w[18]', 'w[19]', 'w[20]', 'w[21]', 'w[22]', 'w[23]']
Parameter containing:
tensor([ 0.1281,  0.0817,  0.1606,  0.3575, -0.1204, -0.4953, -0.2507, -0.2399,
        -0.1335, -0.6552, -0.7908,  0.8965, -0.3084,  0.9522,  0.8152, -0.8261,
         0.3550, -0.0571,  0.5820, -0.8997,  0.0799, -0.4646, -0.1589,  0.4438],
       requires_grad=True)
/tmp/ipykernel_15258/1790914909.py:46: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
  y_t = torch.tensor(output,requires_grad=True)
Parameter containing:
tensor([ 0.1281,  0.0817,  0.1606,  0.3575, -0.1204, -0.4953, -0.2507, -0.2399,
        -0.1335, -0.6552, -0.7908,  0.8965, -0.3084,  0.9522,  0.8152, -0.8261,
         0.3550, -0.0571,  0.5820, -0.8997,  0.0799, -0.4646, -0.1589,  0.4438],
       requires_grad=True)
Parameter containing:
tensor([ 0.1281,  0.0817,  0.1606,  0.3575, -0.1204, -0.4953, -0.2507, -0.2399,
        -0.1335, -0.6552, -0.7908,  0.8965, -0.3084,  0.9522,  0.8152, -0.8261,
         0.3550, -0.0571,  0.5820, -0.8997,  0.0799, -0.4646, -0.1589,  0.4438],
       requires_grad=True)
Parameter containing:
tensor([ 0.1281,  0.0817,  0.1606,  0.3575, -0.1204, -0.4953, -0.2507, -0.2399,
        -0.1335, -0.6552, -0.7908,  0.8965, -0.3084,  0.9522,  0.8152, -0.8261,
         0.3550, -0.0571,  0.5820, -0.8997,  0.0799, -0.4646, -0.1589,  0.4438],
       requires_grad=True)
Training [100%] Loss: 0.0638

As you can see, the weight parameters from the ansatz do not change at all. The paper also mentions that I only need to measure the first qubit so I defined the SparsePauliOp.from_list parameter as ([("IIIIIZ", 1)]) but I'm not sure if this is the right way to do it. Any help on this would be greatly appreciated.

Thanks in advance!

0

There are 0 answers