partial derivative with respect to input in pytorch

36 views Asked by At
import torch
import numpy as np
import torch.nn as nn    
def init_weights(m,):
  if isinstance(m, torch.nn.Linear):
    torch.manual_seed(42)
    torch.nn.init.xavier_uniform_(m.weight)
    m.bias.data.fill_(0.1)
class NeuralNetwork(nn.Module):
    def __init__(self, num_features, hidden_size = 3, hidden_size2 = 4):
        super().__init__()
        
        self.layer1= nn.Linear(num_features, hidden_size)
        self.acti1 = nn.Tanh()
        
        self.layer2= nn.Linear(hidden_size, hidden_size2)
        self.acti2 = nn.Tanh()
            
        self.output = nn.Linear(hidden_size2,1)
        self.apply(init_weights)
    def forward(self, x):
        x = self.layer1(x)
        x = self.acti1(x)
        
        x = self.layer2(x)
        x = self.acti2(x)
       
        x= self.output(x)
        return x
loss_fn = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-2)

def train(X, y, model, loss_fn, optimizer):

            
    # Compute prediction error
    pred = model(X)
    loss = loss_fn(pred, y)
    # Backpropagation
    optimizer.zero_grad()
    loss.backward()
        optimizer.step()

model = NeuralNetwork(num_features=1)
print(model)
X = np.arange(20).reshape(10,2)
y = np.random.randint(2,(10,1))
X1 =torch.from_numpy(X)
y1 = torch.from_numpy(y)
X2=np.arange(20).reshape(10,2)
X_test = torch.from_numpy(X2)

y_pred = model(X_test).detach().numpy() 

Here is a simple example of my problem. I would like to calculate the partial derivative $\frac{d^2 y_pred}{dX[:,0}dX[:,1]}$, also see pictureenter image description here. My plan is to use

[ torch.autograd.functional.hessian(model,X2[i]) for i in range(len(X2))]

But I am not sure if this would give me the result I want. Could anyone help please? Another question I would like to ask. What if X.shape1 is bigger than 2? This means I would calculate $d^n y_pred /(dX_1 ... dX_n) $, also see picture enter image description here. In this case, Hessian wouldn't work. I was thinking maybe torch.autograd.grad would work but I am not sure how to use it.

0

There are 0 answers