GANs for returns simulations

60 Views Asked by At

I am new in python, though I have managed to understand quite a few things. I have been reading about GANs and their use in finance to simulate stocks' returns which yields a more precise simulated distribution than the monte carlo one. I have struggled to code a model that works. Below the code

from keras.layers import Input, Dense
from keras.layers.advanced_activations import LeakyReLU
from keras.models import Sequential, Model
from keras.optimizers import Adam
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import yfinance as yf

gm = yf.download('GM',period='MAX')['Adj Close']
ret = np.log(gm/gm.shift()).dropna()
length = 25
scaler = MinMaxScaler()
ret_scaled = scaler.fit_transform(ret[:,np.newaxis])

def create_samples(dataset):
    samples = []
    
    for i in range(len(dataset)-length):
        samples.append(dataset[i:(i+length)])
    
    return np.array(samples)

samples = create_samples(ret)

optimizer = Adam(0.0002, 0.5)
noise_shape = (100,) 


def build_generator():
      
    generator = Sequential()
    generator.add(Dense(256, input_shape=noise_shape))
    generator.add(LeakyReLU(alpha=0.2))
    generator.add(Dense(512))
    generator.add(LeakyReLU(alpha=0.2))
    generator.add(Dense(1024))
    generator.add(LeakyReLU(alpha=0.2))
    generator.add(Dense(2048))
    generator.add(LeakyReLU(alpha=0.2))
    generator.add(Dense(25, activation='tanh'))
    generator.compile(loss='binary_crossentropy', optimizer=optimizer)

    return generator

def build_discriminator():

    discriminator = Sequential()
    discriminator.add(Dense(1024,input_dim=25))
    discriminator.add(LeakyReLU(alpha=0.2))
    discriminator.add(Dense(512))
    discriminator.add(LeakyReLU(alpha=0.2))
    discriminator.add(Dense(256))
    discriminator.add(LeakyReLU(alpha=0.2))
    discriminator.add(Dense(1, activation='sigmoid'))
    discriminator.compile(loss='binary_crossentropy',optimizer=optimizer)

    return discriminator

def build_gan(discriminator, generator):
    
    discriminator.trainable = False
    gan_input = Input(shape=(100,))
    x = generator(gan_input)
    gan_output = discriminator(x)
    gan = Model(inputs=gan_input, outputs=gan_output)
    gan.compile(loss='binary_crossentropy',optimizer=optimizer)
    
    return gan

def train(epochs=1, batch_size=10):

    X_train = samples
    batch_count = X_train.shape[0] / batch_size
    
    d_loss_logs_r = []
    d_loss_logs_f = []
    g_loss_logs = []
    
    generator = build_generator()
    discriminator = build_discriminator()
    gan = build_gan(discriminator,generator)
    
    for epoch in range(epochs):
        
        noise= np.random.normal(0,1,[batch_size,100])
        gen_imgs = generator.predict(noise)
        y_fake = np.zeros(batch_size)
        
        idx = np.random.randint(0, X_train.shape[0], batch_size)
        image_batch = X_train[idx]
        y_real = np.ones(batch_size)
        
        discriminator.trainable=True
        d_loss_real = discriminator.train_on_batch(image_batch, y_real)
        d_loss_fake = discriminator.train_on_batch(gen_imgs, y_fake)
        d_loss = 0.5 * np.add(d_loss_real, d_loss_fake) 
        
        noise= np.random.normal(0,1,[batch_size,100])
        y_gen = np.ones(batch_size)
        discriminator.trainable=False
        g_loss = gan.train_on_batch(noise, y_gen)
        
        d_loss_logs_r.append([epoch,d_loss_real])
        d_loss_logs_f.append([epoch,d_loss_fake])
        g_loss_logs.append([epoch,g_loss])
        
    d_loss_logs_r_a = np.array(d_loss_logs_r)
    d_loss_logs_f_a = np.array(d_loss_logs_f)
    g_loss_logs_a = np.array(g_loss_logs)
    
    return d_loss_logs_r_a, d_loss_logs_f_a, g_loss_logs_a

When I train the model it only yields the loss function for the discriminator and the generator, but how can I also get the simulated returns (so I can use them instead of the monte carlo's one)?

Many thanks

0

There are 0 best solutions below