CNN Pytorch Error : Input type (torch.cuda.ByteTensor) and weight type (torch.cuda.FloatTensor) should be the same

huangapple go评论74阅读模式
英文:

CNN Pytorch Error : Input type (torch.cuda.ByteTensor) and weight type (torch.cuda.FloatTensor) should be the same

问题

我收到了以下错误信息:

输入类型(torch.cuda.ByteTensor)和权重类型(torch.cuda.FloatTensor)应该相同

以下是我的代码:

device = torch.device('cuda:0')

trainData = torchvision.datasets.FashionMNIST('/content/', train=True, transform=None, target_transform=None, download=True)
testData = torchvision.datasets.FashionMNIST('/content/', train=False, transform=None, target_transform=None, download=True)

class Net(nn.Module):
  def __init__(self):
    super().__init__()

    '''
    网络结构:

    输入 > 
    (1)Conv2D > (2)MaxPool2D > 
    (3)Conv2D > (4)MaxPool2D > 
    (5)Conv2D > (6)MaxPool2D > 
    (7)Linear > (8)LinearOut

    '''

    # 创建卷积层
    self.conv1 = nn.Conv2d(in_channels=CHANNELS, out_channels=32, kernel_size=3)
    self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3)
    self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3)

    self.flatten = None
    # 创建一个随机的虚拟样本以获取平铺的维度
    x = torch.randn(CHANNELS, DIM, DIM).view(-1, CHANNELS, DIM, DIM)
    x = self.convs(x)

    # 创建线性层
    self.fc1 = nn.Linear(self.flatten, 512)
    self.fc2 = nn.Linear(512, CLASSES)

  def convs(self, x):

    # 创建最大池化层
    x = F.max_pool2d(F.relu(self.conv1(x)), kernel_size=(2, 2))
    x = F.max_pool2d(F.relu(self.conv2(x)), kernel_size=(2, 2))
    x = F.max_pool2d(F.relu(self.conv3(x)), kernel_size=(2, 2))

    if not self.flatten:
      self.flatten = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]
    return x

  # 前向传播
  def forward(self, x):
    x = self.convs(x)
    x = x.view(-1, self.flatten)
    sm = F.relu(self.fc1(x))
    x = F.softmax(self.fc2(sm), dim=1)
    return x, sm

x_train, y_train = training_set
x_train, y_train = x_train.to(device), y_train.to(device)
optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE)
loss_func = nn.MSELoss()
loss_log = []

for epoch in range(EPOCHS):
  for i in tqdm(range(0, len(x_train), BATCH_SIZE)):
    x_batch = x_train[i:i+BATCH_SIZE].view(-1, CHANNELS, DIM, DIM).to(device)
    y_batch = y_train[i:i+BATCH_SIZE].to(device)

    net.zero_grad()
    output, sm = net(x_batch)
    loss = loss_func(output, y_batch.float())
    loss.backward()
    optimizer.step()
  loss_log.append(loss)
  # print(f"Epoch : {epoch} || Loss : {loss}")

return loss_log

train_set = (trainData.train_data, trainData.train_labels)
test_set = (testData.test_data, testData.test_labels)

EPOCHS = 5
LEARNING_RATE = 0.001
BATCH_SIZE = 32

net = Net().to(device)

loss_log = train(net, train_set, EPOCHS, LEARNING_RATE, BATCH_SIZE)

这是我收到的错误信息:

RuntimeError                              Traceback (most recent call last)
<ipython-input-8-0db1a1b4e37d> in <module>()
      5 net = Net().to(device)
      6 
----> 7 loss_log = train(net, train_set, EPOCHS, LEARNING_RATE, BATCH_SIZE)

6 frames
<ipython-input-6-7de4a78e3736> in train(net, training_set, EPOCHS, LEARNING_RATE, BATCH_SIZE)
     13 
     14         net.zero_grad()
---> 15         output, sm = net(x_batch)
     16         loss = loss_func(output, y_batch.float())
     17         loss.backward()

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    539             result = self._slow_forward(*input, **kwargs)
    540         else:
--> 541             result = self.forward(*input, **kwargs)
    542         for hook in thismodel.forward_features(x)
    543             hook_result = hook(thismodel, input, result)

<ipython-input-5-4fddc427892a> in forward(self, x)
     41   # 前向传播
     42   def forward(self, x):
---> 43     x = self.convs(x)
     44     x = x.view(-1, self.flatten)
     45     sm = F.relu(self.fc1(x))

<ipython-input-5-4fddc427892a> in convs(self, x)
     31 
     32     # 创建最大池化层
---> 33     x = F.max_pool2d(F.relu(self.conv1(x)), kernel_size=(2, 2))
     34     x = F.max_pool2d(F.relu(self.conv2(x)), kernel_size=(2, 2))
     35     x = F.max_pool2d(F.relu(self.conv3(x)), kernel_size=(2, 2))

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    539             result = thismodel.features
    540         else:
--> 541             result = thismodel.forward(*input, **kwargs)
    542         for hook in thismodel.forward_features(x)
    543             hook_result = hook(thismodel, input, result)

<ipython-input-5-4fddc427892a> in forward(self, x)
     41   # 前向传播
     42   def forward(self, x):
---> 43     x = self.convs(x)
     44     x = x.view(-1, self.flatten)
     45     sm = F.relu(self.fc1(x))

<ipython-input-5-4fddc427892a> in convs(self, x)
     31 
     32     # 创建最大池化层
---> 33     x = F.max_pool2d(F.relu(self.conv1(x)), kernel_size=(2, 2))
     34     x = F.max_pool2d(F.relu(self.conv2(x)), kernel_size=(2

<details>
<summary>英文:</summary>

I&#39;m receiving the error,
&gt; Input type (torch.cuda.ByteTensor) and weight type (torch.cuda.FloatTensor) should be the same

Following is my code,

    device    = torch.device(&#39;cuda:0&#39;)
    
    trainData = torchvision.datasets.FashionMNIST(&#39;/content/&#39;, train=True, transform=None, target_transform=None, download=True)
    testData  = torchvision.datasets.FashionMNIST(&#39;/content/&#39;, train=False, transform=None, target_transform=None, download=True)
    
    class Net(nn.Module):
      def __init__(self):
        super().__init__()
    
        &#39;&#39;&#39;
        Network Structure:
    
        input &gt; 
        (1)Conv2D &gt; (2)MaxPool2D &gt; 
        (3)Conv2D &gt; (4)MaxPool2D &gt; 
        (5)Conv2D &gt; (6)MaxPool2D &gt; 
        (7)Linear &gt; (8)LinearOut
    
        &#39;&#39;&#39;
    
        # Creating the convulutional Layers
        self.conv1 = nn.Conv2d(in_channels=CHANNELS, out_channels=32, kernel_size=3)
        self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3)
        self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3)
    
        self.flatten = None
        # Creating a Random dummy sample to get the Flattened Dimensions
        x = torch.randn(CHANNELS, DIM, DIM).view(-1, CHANNELS, DIM, DIM)
        x = self.convs(x)
    
        # Creating the Linear Layers
        self.fc1   = nn.Linear(self.flatten, 512)
        self.fc2   = nn.Linear(512, CLASSES)
    
      def convs(self, x):
    
        # Creating the MaxPooling Layers
        x = F.max_pool2d(F.relu(self.conv1(x)), kernel_size=(2, 2))
        x = F.max_pool2d(F.relu(self.conv2(x)), kernel_size=(2, 2))
        x = F.max_pool2d(F.relu(self.conv3(x)), kernel_size=(2, 2))
    
        if not self.flatten:
          self.flatten = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]
        return x
    
      # FORWARD PASS
      def forward(self, x):
        x = self.convs(x)
        x = x.view(-1, self.flatten)
        sm = F.relu(self.fc1(x))
        x = F.softmax(self.fc2(sm), dim=1)
        return x, sm
    
    
      x_train, y_train = training_set
      x_train, y_train = x_train.to(device), y_train.to(device)
      optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE)
      loss_func = nn.MSELoss()
      loss_log  = []
    
      for epoch in range(EPOCHS):
        for i in tqdm(range(0, len(x_train), BATCH_SIZE)):
            x_batch = x_train[i:i+BATCH_SIZE].view(-1, CHANNELS, DIM, DIM).to(device)
            y_batch = y_train[i:i+BATCH_SIZE].to(device)
    
            net.zero_grad()
            output, sm = net(x_batch)
            loss = loss_func(output, y_batch.float())
            loss.backward()
            optimizer.step()
        loss_log.append(loss)
        # print(f&quot;Epoch : {epoch} || Loss : {loss}&quot;)
    
      return loss_log
    
    
    train_set = (trainData.train_data, trainData.train_labels)
    test_set  = (testData.test_data, testData.test_labels)
    
    EPOCHS        = 5
    LEARNING_RATE = 0.001
    BATCH_SIZE    = 32
    
    net = Net().to(device)
    
    loss_log = train(net, train_set, EPOCHS, LEARNING_RATE, BATCH_SIZE)

**And this is the Error that I&#39;m getting,**

    RuntimeError                              Traceback (most recent call last)
    &lt;ipython-input-8-0db1a1b4e37d&gt; in &lt;module&gt;()
          5 net = Net().to(device)
          6 
    ----&gt; 7 loss_log = train(net, train_set, EPOCHS, LEARNING_RATE, BATCH_SIZE)
    
    6 frames
    &lt;ipython-input-6-7de4a78e3736&gt; in train(net, training_set, EPOCHS, LEARNING_RATE, BATCH_SIZE)
         13 
         14         net.zero_grad()
    ---&gt; 15         output, sm = net(x_batch)
         16         loss = loss_func(output, y_batch.float())
         17         loss.backward()
    
    /usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
        539             result = self._slow_forward(*input, **kwargs)
        540         else:
    --&gt; 541             result = self.forward(*input, **kwargs)
        542         for hook in self._forward_hooks.values():
        543             hook_result = hook(self, input, result)
    
    &lt;ipython-input-5-4fddc427892a&gt; in forward(self, x)
         41   # FORWARD PASS
         42   def forward(self, x):
    ---&gt; 43     x = self.convs(x)
         44     x = x.view(-1, self.flatten)
         45     sm = F.relu(self.fc1(x))
    
    &lt;ipython-input-5-4fddc427892a&gt; in convs(self, x)
         31 
         32     # Creating the MaxPooling Layers
    ---&gt; 33     x = F.max_pool2d(F.relu(self.conv1(x)), kernel_size=(2, 2))
         34     x = F.max_pool2d(F.relu(self.conv2(x)), kernel_size=(2, 2))
         35     x = F.max_pool2d(F.relu(self.conv3(x)), kernel_size=(2, 2))
    
    /usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
        539             result = self._slow_forward(*input, **kwargs)
        540         else:
    --&gt; 541             result = self.forward(*input, **kwargs)
        542         for hook in self._forward_hooks.values():
        543             hook_result = hook(self, input, result)
    
    /usr/local/lib/python3.6/dist-packages/torch/nn/modules/conv.py in forward(self, input)
        343 
        344     def forward(self, input):
    --&gt; 345         return self.conv2d_forward(input, self.weight)
        346 
        347 class Conv3d(_ConvNd):
    
    /usr/local/lib/python3.6/dist-packages/torch/nn/modules/conv.py in conv2d_forward(self, input, weight)
        340                             _pair(0), self.dilation, self.groups)
        341         return F.conv2d(input, weight, self.bias, self.stride,
    --&gt; 342                         self.padding, self.dilation, self.groups)
        343 
        344     def forward(self, input):
    
    RuntimeError: Input type (torch.cuda.ByteTensor) and weight type (torch.cuda.FloatTensor) should be the same

I double-checked that my Neural Net and my Inputs both are in GPU. I&#39;m still getting this error and I don&#39;t understand why!

Somebody, please help me to get out of this error.


</details>


# 答案1
**得分**: 23

将输入的 `x_batch` 转换为浮点型在将其传递到模型之前使用 `x_batch = x_batch.float()`。

<details>
<summary>英文:</summary>

Cast your input `x_batch` to float. Use `x_batch = x_batch.float()` before you pass it through your model. 

</details>



huangapple
  • 本文由 发表于 2020年1月4日 01:15:36
  • 转载请务必保留本文链接:https://go.coder-hub.com/59582663.html
匿名

发表评论

匿名网友

:?: :razz: :sad: :evil: :!: :smile: :oops: :grin: :eek: :shock: :???: :cool: :lol: :mad: :twisted: :roll: :wink: :idea: :arrow: :neutral: :cry: :mrgreen:

确定