DCGAN, generator model, conv_transpose2d(): argument 'output_padding' (position 6) must be tuple of ints, not tuple

353 Views Asked by At

I tried implementing a DCGAN generator using the following code

class Generator(nn.Module):
    """
    Input : `(batch_size, 100)`

    Output: `(batch_size, 3, 64, 64)`

    A generator that takes noise as input and generates images.

    loss: Binary cross entropy loss
    """
    def __init__(self):
        super().__init__()
        # layer_dims = [1024, 512, 256, 128, 64]
        layer_dims = [ngf*8, ngf*4, ngf*2, ngf]
        self.layers = nn.ModuleList([self.upscale_block(in_f, out_f, kernel_size=4, stride=2, padding=1)
                      for in_f, out_f in zip(layer_dims[:-1], layer_dims[1:])])

        self.gen = nn.Sequential(
            self.input_layer(nz, ngf*8, use_bias=False),
            *self.layers,
            self.output_layer(ngf, 3, use_bias=False),
        )


    def forward(self, x):
        print("Generator input: ", x.shape)
        # x = torch.tensor(self.dense(x, 4*4*1024, use_bias=False), dtype=torch.float32)
        # x = x.view(-1, 1024, 4, 4)
        # x = self.gen(x)
        return self.gen(x)


    def upscale_block(self, in_channels, out_channels, kernel_size, stride, padding):
        return nn.Sequential(
            nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(True)
        )
    
    def input_layer(self, in_f, out_f, kernel_size=4, stride=1, padding=0, use_bias=False):
        return nn.Sequential(
            nn.ConvTranspose2d(in_f, out_f, kernel_size, stride, padding, bias=use_bias),
            nn.BatchNorm2d(out_f),
            nn.ReLU(True),
        )
    

    def output_layer(self, in_f, out_f, use_bias=True):
        return nn.Sequential(
            nn.ConvTranspose2d(in_f, out_f, 4, 2, 1, use_bias),
            nn.Tanh()
        )

following code from the official pytorch DCGAN tutorial

# Generator Code

class Generator(nn.Module):
    def __init__(self, ngpu):
        super(Generator, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # input is Z, going into a convolution
            nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False),
            nn.BatchNorm2d(ngf * 8),
            nn.ReLU(True),
            # state size. (ngf*8) x 4 x 4
            nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 4),
            nn.ReLU(True),
            # state size. (ngf*4) x 8 x 8
            nn.ConvTranspose2d( ngf * 4, ngf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 2),
            nn.ReLU(True),
            # state size. (ngf*2) x 16 x 16
            nn.ConvTranspose2d( ngf * 2, ngf, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf),
            nn.ReLU(True),
            # state size. (ngf) x 32 x 32
            nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),
            nn.Tanh()
            # state size. (nc) x 64 x 64
        )

    def forward(self, input):
        return self.main(input)

but when i am running the code

z = torch.randn(64, nz, 1, 1)
gen_model = Generator()
gen_model_pytorch = Generator_pytorch()
print(gen_model_pytorch)
print(gen_model_pytorch)

i get the following as output


torch.Size([64, 3, 64, 64])
Generator input:  torch.Size([64, 100, 1, 1])
---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
/tmp/ipykernel_939/3945186195.py in <module>
      3 gen_model_pytorch = Generator_pytorch()
      4 print(gen_model_pytorch(z).shape)
----> 5 print(gen_model(z).shape)
      6 
      7 # print(gen_model)

/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
   1108         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1109                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110             return forward_call(*input, **kwargs)
   1111         # Do not call functions when jit is used
   1112         full_backward_hooks, non_full_backward_hooks = [], []

/tmp/ipykernel_939/2092446074.py in forward(self, x)
     28         # x = x.view(-1, 1024, 4, 4)
     29         # x = self.gen(x)
---> 30         return self.gen(x)
     31 
     32 

/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
   1108         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1109                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110             return forward_call(*input, **kwargs)
   1111         # Do not call functions when jit is used
   1112         full_backward_hooks, non_full_backward_hooks = [], []

/opt/conda/lib/python3.8/site-packages/torch/nn/modules/container.py in forward(self, input)
    139     def forward(self, input):
    140         for module in self:
--> 141             input = module(input)
    142         return input
    143 

/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
   1108         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1109                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110             return forward_call(*input, **kwargs)
   1111         # Do not call functions when jit is used
   1112         full_backward_hooks, non_full_backward_hooks = [], []

/opt/conda/lib/python3.8/site-packages/torch/nn/modules/container.py in forward(self, input)
    139     def forward(self, input):
    140         for module in self:
--> 141             input = module(input)
    142         return input
    143 

/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
   1108         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1109                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110             return forward_call(*input, **kwargs)
   1111         # Do not call functions when jit is used
   1112         full_backward_hooks, non_full_backward_hooks = [], []
/opt/conda/lib/python3.8/site-packages/torch/nn/modules/conv.py in forward(self, input, output_size)
    923             input, output_size, self.stride, self.padding, self.kernel_size, self.dilation)  # type: ignore[arg-type]
    924 
--> 925         return F.conv_transpose2d(
    926             input, self.weight, self.bias, self.stride, self.padding,
    927             output_padding, self.groups, self.dilation)

TypeError: conv_transpose2d(): argument 'output_padding' (position 6) must be tuple of ints, not tuple

It's working for the tutorial code(obvio) but not for the same custom network in my Generator class, albeit I followed the architecture as the tutorial, where I cross checked the file both models layers by print() they were same, ... what layer causes this issue?

0

There are 0 best solutions below