from sklearn.datasets import load_boston
boston = load_boston()
boston_data = pd.DataFrame(boston.data)
boston_data.columns=boston.feature_names
boston_data['PRICE']=boston.target # Dependent Variable, value to be predicted
# For the currrent dataset, doing log transformation to reduce outliers then normalising the dataset
boston_data_log=np.log1p(boston_data)
boston_data_log=(boston_data_log-boston_data_log.min())/(boston_data_log.max()-boston_data_log.min())
boston_data_log['PRICE']=boston_data['PRICE']
## Building an ANN
# Log_transformed and normalised dataset
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(boston_data_log.drop(columns=['PRICE']).values,boston_data_log['PRICE'].values,test_size=0.2)
x_train=torch.FloatTensor(x_train)
x_test=torch.FloatTensor(x_test)
y_train=torch.FloatTensor(y_train)
y_test=torch.FloatTensor(y_test)
import torch
import torch.nn as nn
import torch.nn.functional as F
class ANN(nn.Module):
def __init__(self,input_layer,hidden_layer_1,hidden_layer_2,hidden_layer_3,hidden_layer_4,hidden_layer_5,output_layer):
super().__init__()
self.fully_connected_1=nn.Linear(input_layer,hidden_layer_1)
self.fully_connected_2=nn.Linear(hidden_layer_1,hidden_layer_2)
self.fully_connected_3=nn.Linear(hidden_layer_2,hidden_layer_3)
self.fully_connected_4=nn.Linear(hidden_layer_3,hidden_layer_4)
self.fully_connected_5=nn.Linear(hidden_layer_4,hidden_layer_5)
self.output_layer=nn.Linear(hidden_layer_5,output_layer)
def forward_prop(self,x):
x=F.relu(self.fully_connected_1(x))
x=F.relu(self.fully_connected_2(x))
x=F.relu(self.fully_connected_3(x))
x=F.relu(self.fully_connected_4(x))
x=F.relu(self.fully_connected_5(x))
x=self.output_layer(x)
return x
torch.manual_seed(20)
model=ANN(x_train.shape[1],200,200,200,200,200,1)
loss_function=nn.MSELoss()
optimizer=torch.optim.Adam(model.parameters(),lr=0.01)
epochs=500
loss_cumul_list=[]
loop_list=[]
for i in range(epochs):
y_pred=model.forward_prop(x_train)
loss=loss_function(y_pred,y_train)
loss_cumul_list.append(loss.item())
loop_list.append(i+1)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_cumul_list
predictions=[]
with torch.no_grad():
for i,data in enumerate(x_test):
y_pred=model.forward_prop(data)
predictions.append(y_pred.item())
from sklearn.metrics import r2_score,mean_squared_error,mean_absolute_error
error_r2=r2_score(y_test,predictions)
error_mse=mean_squared_error(y_test,predictions)
error_mae=mean_absolute_error(y_test,predictions)
I have been tying to apply ANN on boston house price dataset using Pytorch, but after 50 epochs, the loss doesn't reduce further much. Have seen some similar notebooks online using tensor flow, but the models works fine there. Unable to understand the problem here.
I have been tying to apply ANN on boston house price dataset using Pytorch, but after 50 epochs, the loss doesn't reduce further much. Have seen some similar notebooks online using tensor flow, but the models works fine there. Unable to understand the problem here.