I am trying implement a custom arcface loss function:
def call(self, y_true: FloatTensor, y_pred: FloatTensor) -> FloatTensor:
projector = tf.math.l2_normalize(y_true, axis=1)
predictor = tf.math.l2_normalize(y_pred, axis=1)
cos_theta = tf.matmul(projector, predictor)
cos_theta = tf.clip_by_value(cos_theta, -1.0, 1.0)
index = tf.where(self.num_classes != -1)[0]
m_hot = tf.zeros(index.size()[0], cos_theta.size()[1])
m_hot.scatter_(1, self.num_classes[index, None], self.margin)
cos_theta = tf.acos(cos_theta)
cos_theta[index] += m_hot
cos_theta = cos_theta.cos()
cos_theta = tf.math.multiply(cos_theta, self.scale)
loss: FloatTensor = tf.nn.softmax(cos_theta, dim=1)
return loss
But am getting the following error:
TypeError: Expected uint8 passed to parameter 'y' of op 'Maximum', got 1e-12 of type 'float' instead. Error: Expected uint8, but got 1e-12 of type 'float'.