I am trying to apply momentum and decay to a mini-batch SGD: What would be the right way to update my weights, I get weird results as soon as decay is set..
import numpy as np
def _mini_batch(self,X,y,batch_size):
# sack data for shuffle - mini batch
rows = len(X)
X_full = np.hstack(( np.ones((rows,1)), X , np.array(y).reshape(rows,-1) ))
np.random.shuffle(X_full)
# Performing minibatch
num_batches = rows // batch_size
for rng in range(num_batches):
start_rng, end_rng = rng*batch_size , (rng+1)*batch_size
yield X_full[start_rng:end_rng, :-1], X_full[start_rng:end_rng, -1] # X_batch, y_batch
if not rows % batch_size == 0:
yield X_full[end_rng:rows, :-1], X_full[end_rng:rows, -1] # X_batch, y_batch
decay_rate = 0.2
alpha = 0.1 #learning rate
weights = np.random.normal(size=X.shape[-1]) #np.zeros(X.shape[-1])
rows = len(X)
for i in range(epochs):
# init mini-batch to update gradients for each batch
for X_batch, y_batch in _mini_batch(X,y,batch_size):
train_predictions = np.dot(X,weights) #y_hat
errors = np.subtract(train_predictions, y)
self.weights = (1. - decay_rate) * weights - alpha * np.dot(X.T,errors) / rows