class ConvolutionalNetwork(nn.Module):
def __init__(self, in_features, trial):
# we optimize the number of layers, hidden units and dropout ratio in each layer.
n_layers = self.trial.suggest_int("n_layers", 1, 5)
p = self.trial.suggest_uniform("dropout_1{}".format(i), 0, 1.0)
layers = []
for i in range(n_layers):
self.out_features = self.trial.suggest_int("n_units_1{}".format(i), 16, 160,step =2)
kernel_size = trial.suggest_int('kernel_size', 2, 7)
layers.append(nn.Conv1d(1, self.out_features,kernel_size,1))
layers.append(nn.RReLU())
layers.append(nn.BatchNorm1d(self.out_features)
layers.append(nn.Dropout(p))
self.in_features = self.out_features
layers.append(nn.Conv1d(self.in_features, 16,kernel_size,1))
layers.append(nn.RReLU())
return nn.Sequential(*layers)
As you can see above, I did some Optuna tuning of parameters , including tuning on the number of layers.
def forward(self,x):
# shape x for conv 1d op
x = x.view(-1, 1, self.in_features)
x = self.conv1(x)
x = F.rrelu(x)
x = F.max_pool1d(x, 64, 64)
x = self.conv2(x)
x = F.rrelu(x)
x = F.max_pool1d(x, 64, 64)
x = x.view(-1, self.n_conv)
x = self.dp(x)
x = self.fc3(x)
x = F.log_softmax(x, dim=1)
return x
I now need to do the same for the forward function above, I did the pseudo code below but it won't run, kindly advise how. The main issue is to incorporate the for loop function for the forward function.
def forward(self,x):
# shape x for conv 1d op
x = x.view(-1, 1, self.in_features)
for i in range(n_layers):
layers.append(self.conv1(x))
layers.append(F.rrelu(x))
layers.append(F.max_pool1d(x, 64, 64))
x = x.view(-1, self.n_conv)
x = self.dp(x)
x = self.fc3(x)
#x = F.sigmoid(x)
x = F.log_softmax(x, dim=1)
return x
there are a bunch of errors that make it hard to understand what you intended to do :
nn.Sequential
model in the__init__
and not use it ?return
instruction in__init__
??in_channels
is always 1). Theout_feature
of one iteration should be thein_features
of the next iterationforward
function appends tensors in alayers
list (which you did not declare btw) and then does not use this list.forward
, you reshape your input withx = x.view(-1, 1, self.in_features)
, but at that pointin_features
does not match at all the numer of input channels for the first convolution layer.long story short : correct all the above errors, and then something like :
should work