I'd like to implement a custom loss for a RaGAN network in Keras that follows this:
Discriminator Loss:

Generator Loss:

This is what I have for now, but the output that I am getting is always constant for the discriminator model. There might be something wrong with the implementation, but not sure what.
Real_image = Input(shape=X.shape[1:])
Noise_input = Input(shape=(32,))
Fake_image = generator(Noise_input)
Discriminator_real_out = discriminator(Real_image)
Discriminator_fake_out = discriminator(Fake_image)
Discriminator_fake_average_out = K.mean(Discriminator_fake_out, axis=0)
Discriminator_real_average_out = K.mean(Discriminator_real_out, axis=0)
Real_Fake_relativistic_average_out = Discriminator_real_out - Discriminator_fake_average_out
Fake_Real_relativistic_average_out = Discriminator_fake_out - Discriminator_real_average_out
def relativistic_discriminator_loss(y_true, y_pred):
return -(K.mean(K.log(K.sigmoid(Real_Fake_relativistic_average_out)+epsilon ),axis=0)+K.mean(K.log(1-K.sigmoid(Fake_Real_relativistic_average_out)+epsilon),axis=0))
def relativistic_generator_loss(y_true, y_pred):
relativistic_generator_loss0 = -(K.mean(K.log(K.sigmoid(Fake_Real_relativistic_average_out)+epsilon),axis=0)+K.mean(K.log(1-K.sigmoid(Real_Fake_relativistic_average_out)+epsilon),axis=0))
mse_loss = K.mean(K.square(Real_image - Fake_image), axis=-1)
l1_loss = K.mean(K.abs(Real_image - Fake_image), axis=-1)
content_loss = (mse_loss + l1_loss) / 2
return content_loss + lambda_val * relativistic_generator_loss0
If anyone could help with this, that would be great.