Pytorch DCGAN example doesn't work with different image sizes

668 Views Asked by At

I'm trying to use this code as a starting point for building GANs from my own image data set of 224x224 images. Image size is included in the default arguments (e.g. --imageSize 224).

Here is the code snippet with the model:

class _netG(nn.Module):
def __init__(self, ngpu):
    super(_netG, self).__init__()
    self.ngpu = ngpu
    self.main = nn.Sequential(
        # input is Z, going into a convolution
        nn.ConvTranspose2d(     nz, ngf * 8, 4, 1, 0, bias=False),
        nn.BatchNorm2d(ngf * 8),
        nn.ReLU(True),
        # state size. (ngf*8) x 4 x 4
        nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
        nn.BatchNorm2d(ngf * 4),
        nn.ReLU(True),
        # state size. (ngf*4) x 8 x 8
        nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
        nn.BatchNorm2d(ngf * 2),
        nn.ReLU(True),
        # state size. (ngf*2) x 16 x 16
        nn.ConvTranspose2d(ngf * 2,     ngf, 4, 2, 1, bias=False),
        nn.BatchNorm2d(ngf),
        nn.ReLU(True),
        # state size. (ngf) x 32 x 32
        nn.ConvTranspose2d(    ngf,      nc, 4, 2, 1, bias=False),
        nn.Tanh()
        # state size. (nc) x 64 x 64
    )

def forward(self, input):
    if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
        output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
    else:
        output = self.main(input)
    return output

class _netD(nn.Module):
def __init__(self, ngpu):
    super(_netD, self).__init__()
    self.ngpu = ngpu
    self.main = nn.Sequential(
        # input is (nc) x 64 x 64
        nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
        nn.LeakyReLU(0.2, inplace=True),
        # state size. (ndf) x 32 x 32
        nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
        nn.BatchNorm2d(ndf * 2),
        nn.LeakyReLU(0.2, inplace=True),
        # state size. (ndf*2) x 16 x 16
        nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
        nn.BatchNorm2d(ndf * 4),
        nn.LeakyReLU(0.2, inplace=True),
        # state size. (ndf*4) x 8 x 8
        nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
        nn.BatchNorm2d(ndf * 8),
        nn.LeakyReLU(0.2, inplace=True),
        # state size. (ndf*8) x 4 x 4
        nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
        nn.Sigmoid()
    )

def forward(self, input):
    if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
        output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
    else:
        output = self.main(input)

    return output.view(-1, 1).squeeze(1)

If I change any of the default arguments (e.g. --imageSize 224) I get the following error:

Traceback (most recent call last):
   File "main.py", line 209, in <module>
      errD_real = criterion(output, label)
   File "/opt/python/lib/python3.6/site-
      packages/torch/nn/modules/module.py", line 210, in __call__
        result = self.forward(*input, **kwargs)
     File "/opt/python/lib/python3.6/site-
    packages/torch/nn/modules/loss.py", line 36, in forward
     return backend_fn(self.size_average, weight=self.weight)(input, 
   target)
    File "/opt/python/lib/python3.6/site-
      packages/torch/nn/_functions/thnn/loss.py", line 22, in forward
      assert input.nelement() == target.nelement()
     AssertionError

I was told at first to try other models, but I also thought it would be good to try to get around this issue. I've attempted the solutions outline in this github thread, but it still doesn't work. I'm still learning my way around PyTorch so the network architectures that are spit out before the above message don't yet give me much intuition. I appreciate any pointers you can give!

0

There are 0 best solutions below