I'm trying to develop an image segmentation model. In the below code I keep hitting a RuntimeError: Input type (torch.cuda.ByteTensor) and weight type (torch.cuda.FloatTensor) should be the same. I'm not sure why as I've tried to load both my data and my UNet model to the GPU using .cuda() (although not the skorch model-- not sure how to do that). I'm using a library for active learning, modAL, which wraps skorch.
from modAL.models import ActiveLearner
import numpy as np
import torch
from torch import nn
from torch import Tensor
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from skorch.net import NeuralNet
from modAL.models import ActiveLearner
from modAL.uncertainty import classifier_uncertainty, classifier_margin
from modAL.utils.combination import make_linear_combination, make_product
from modAL.utils.selection import multi_argmax
from modAL.uncertainty import uncertainty_sampling
from model import UNet
from skorch.net import NeuralNet
from skorch.helper import predefined_split
from torch.optim import SGD
import cv2
# Map style dataset,
class ImagesDataset(Dataset):
"""Constructs dataset of satellite images + masks"""
def __init__(self, image_paths):
super().__init__()
self.image_paths = image_paths
def __len__(self):
return len(self.image_paths)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
print("idx:", idx)
sample_dir = self.image_paths[idx]
img_path = sample_dir +"/images/"+ Path(sample_dir).name +'.png'
mask_path = sample_dir +'/mask.png'
img, mask = cv2.imread(img_path), cv2.imread(mask_path)
print("shape of img", img.shape)
return img, mask
# turn data into dataset
train_ds = ImagesDataset(train_dirs)
val_ds = ImagesDataset(valid_dirs)
train_loader = torch.utils.data.DataLoader(train_ds, batch_size=3, shuffle=True, pin_memory=True)
val_loader = torch.utils.data.DataLoader(val_ds, batch_size=1, shuffle=True, pin_memory=True)
# make sure data loaded in cuda for train, validation
for i, (tr, val) in enumerate(train_loader):
tr, val = tr.cuda(), val.cuda()
for i, (tr2, val2) in enumerate(val_loader):
tr2, val2 = tr2.cuda(), val2.cuda()
X, y = next(iter(train_loader))
X_train = np.array(X.reshape(3,3,1024,1024))
y_train = np.array(y.reshape(3,3,1024,1024))
X2, y2 = next(iter(val_loader))
X_test = np.array(X2.reshape(1,3,1024,1024))
y_test = np.array(y2.reshape(1,3,1024,1024))
module = UNet(pretrained=True)
if torch.cuda.is_available():
module = module.cuda()
# create the classifier
net = NeuralNet(
module,
criterion=torch.nn.NLLLoss,
batch_size=32,
max_epochs=20,
optimizer=SGD,
optimizer__momentum=0.9,
iterator_train__shuffle=True,
iterator_train__num_workers=4,
iterator_valid__shuffle=False,
iterator_valid__num_workers=4,
train_split=predefined_split(val_ds),
device='cuda',
)
# assemble initial data
n_initial = 1
initial_idx = np.random.choice(range(len(X_train)), size=n_initial, replace=False)
X_initial = X_train[initial_idx]
y_initial = y_train[initial_idx]
# generate the pool, remove the initial data from the training dataset
X_pool = np.delete(X_train, initial_idx, axis=0)
y_pool = np.delete(y_train, initial_idx, axis=0)
# train the activelearner
# shape of 4D matrix is 'batch', 'channel', 'width', 'height')
learner = ActiveLearner(
estimator= net,
X_training=X_initial, y_training=y_initial,
)
The full error trace is:
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-83-0af6007b6b72> in <module>
8 learner = ActiveLearner(
9 estimator= net,
---> 10 X_training=X_initial, y_training=y_initial,
11 # X_training=X_initial, y_training=y_initial,
12 )
~/.local/lib/python3.7/site-packages/modAL/models/learners.py in __init__(self, estimator, query_strategy, X_training, y_training, bootstrap_init, on_transformed, **fit_kwargs)
80 ) -> None:
81 super().__init__(estimator, query_strategy,
---> 82 X_training, y_training, bootstrap_init, on_transformed, **fit_kwargs)
83
84 def teach(self, X: modALinput, y: modALinput, bootstrap: bool = False, only_new: bool = False, **fit_kwargs) -> None:
~/.local/lib/python3.7/site-packages/modAL/models/base.py in __init__(self, estimator, query_strategy, X_training, y_training, bootstrap_init, on_transformed, force_all_finite, **fit_kwargs)
70 self.y_training = y_training
71 if X_training is not None:
---> 72 self._fit_to_known(bootstrap=bootstrap_init, **fit_kwargs)
73 self.Xt_training = self.transform_without_estimating(self.X_training) if self.on_transformed else None
74
~/.local/lib/python3.7/site-packages/modAL/models/base.py in _fit_to_known(self, bootstrap, **fit_kwargs)
160 """
161 if not bootstrap:
--> 162 self.estimator.fit(self.X_training, self.y_training, **fit_kwargs)
163 else:
164 n_instances = self.X_training.shape[0]
~/.local/lib/python3.7/site-packages/skorch/net.py in fit(self, X, y, **fit_params)
901 self.initialize()
902
--> 903 self.partial_fit(X, y, **fit_params)
904 return self
905
~/.local/lib/python3.7/site-packages/skorch/net.py in partial_fit(self, X, y, classes, **fit_params)
860 self.notify('on_train_begin', X=X, y=y)
861 try:
--> 862 self.fit_loop(X, y, **fit_params)
863 except KeyboardInterrupt:
864 pass
~/.local/lib/python3.7/site-packages/skorch/net.py in fit_loop(self, X, y, epochs, **fit_params)
774
775 self.run_single_epoch(dataset_train, training=True, prefix="train",
--> 776 step_fn=self.train_step, **fit_params)
777
778 if dataset_valid is not None:
~/.local/lib/python3.7/site-packages/skorch/net.py in run_single_epoch(self, dataset, training, prefix, step_fn, **fit_params)
810 yi_res = yi if not is_placeholder_y else None
811 self.notify("on_batch_begin", X=Xi, y=yi_res, training=training)
--> 812 step = step_fn(Xi, yi, **fit_params)
813 self.history.record_batch(prefix + "_loss", step["loss"].item())
814 self.history.record_batch(prefix + "_batch_size", get_len(Xi))
~/.local/lib/python3.7/site-packages/skorch/net.py in train_step(self, Xi, yi, **fit_params)
707 return step['loss']
708
--> 709 self.optimizer_.step(step_fn)
710 return step_accumulator.get_step()
711
~/.local/lib/python3.7/site-packages/torch/autograd/grad_mode.py in decorate_context(*args, **kwargs)
24 def decorate_context(*args, **kwargs):
25 with self.__class__():
---> 26 return func(*args, **kwargs)
27 return cast(F, decorate_context)
28
~/.local/lib/python3.7/site-packages/torch/optim/sgd.py in step(self, closure)
84 if closure is not None:
85 with torch.enable_grad():
---> 86 loss = closure()
87
88 for group in self.param_groups:
~/.local/lib/python3.7/site-packages/skorch/net.py in step_fn()
703 def step_fn():
704 self.optimizer_.zero_grad()
--> 705 step = self.train_step_single(Xi, yi, **fit_params)
706 step_accumulator.store_step(step)
707 return step['loss']
~/.local/lib/python3.7/site-packages/skorch/net.py in train_step_single(self, Xi, yi, **fit_params)
643 """
644 self.module_.train()
--> 645 y_pred = self.infer(Xi, **fit_params)
646 loss = self.get_loss(y_pred, yi, X=Xi, training=True)
647 loss.backward()
~/.local/lib/python3.7/site-packages/skorch/net.py in infer(self, x, **fit_params)
1046 x_dict = self._merge_x_and_fit_params(x, fit_params)
1047 return self.module_(**x_dict)
-> 1048 return self.module_(x, **fit_params)
1049
1050 def _get_predict_nonlinearity(self):
~/.local/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/al/model.py in forward(self, x)
51
52 def forward(self, x):
---> 53 conv1 = self.conv1(x)
54 conv2 = self.conv2(conv1)
55 conv3 = self.conv3(conv2)
~/.local/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/.local/lib/python3.7/site-packages/torch/nn/modules/container.py in forward(self, input)
115 def forward(self, input):
116 for module in self:
--> 117 input = module(input)
118 return input
119
~/.local/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/.local/lib/python3.7/site-packages/torch/nn/modules/conv.py in forward(self, input)
421
422 def forward(self, input: Tensor) -> Tensor:
--> 423 return self._conv_forward(input, self.weight)
424
425 class Conv3d(_ConvNd):
~/.local/lib/python3.7/site-packages/torch/nn/modules/conv.py in _conv_forward(self, input, weight)
418 _pair(0), self.dilation, self.groups)
419 return F.conv2d(input, weight, self.bias, self.stride,
--> 420 self.padding, self.dilation, self.groups)
421
422 def forward(self, input: Tensor) -> Tensor:
RuntimeError: Input type (torch.cuda.ByteTensor) and weight type (torch.cuda.FloatTensor) should be the same
If anyone could help that would be so so so appreciated! I've been really stuck despite searching all over-- casting my UNet model to floats has not helped and I think I've called .cuda() where I'm supposed to.
Specific things I've tried:
- RuntimeError: Input type (torch.FloatTensor) and weight type (torch.cuda.FloatTensor) should be the same loading entries of my DataLoader to CUDA
- adding pin_memory to my DataLoader
- loading my skorch NeuralNetwork to CUDA as stated here Pytorch, INPUT (normal tensor) and WEIGHT (cuda tensor) mismatch (which didn't work because it's not a function in skorch)
- Casting my data to float (https://discuss.pytorch.org/t/input-type-torch-cuda-doubletensor-and-weight-type-torch-cuda-floattensor-should-be-the-same/22704)
cv2.imread
gives younp.uint8
data type which will be converted to PyTorch's byte. The byte type cannot be used with the float type (which is most probably used by your model).You need to convert the byte type to float type (and to Tensor), by modifying the dataset