CNN Pytorch Error : Input type (torch.cuda.ByteTensor) and weight type (torch.cuda.FloatTensor) should be the same

huangapple go评论120阅读模式
英文:

CNN Pytorch Error : Input type (torch.cuda.ByteTensor) and weight type (torch.cuda.FloatTensor) should be the same

问题

我收到了以下错误信息:

输入类型(torch.cuda.ByteTensor)和权重类型(torch.cuda.FloatTensor)应该相同

以下是我的代码:

  1. device = torch.device('cuda:0')
  2. trainData = torchvision.datasets.FashionMNIST('/content/', train=True, transform=None, target_transform=None, download=True)
  3. testData = torchvision.datasets.FashionMNIST('/content/', train=False, transform=None, target_transform=None, download=True)
  4. class Net(nn.Module):
  5. def __init__(self):
  6. super().__init__()
  7. '''
  8. 网络结构:
  9. 输入 >
  10. (1)Conv2D > (2)MaxPool2D >
  11. (3)Conv2D > (4)MaxPool2D >
  12. (5)Conv2D > (6)MaxPool2D >
  13. (7)Linear > (8)LinearOut
  14. '''
  15. # 创建卷积层
  16. self.conv1 = nn.Conv2d(in_channels=CHANNELS, out_channels=32, kernel_size=3)
  17. self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3)
  18. self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3)
  19. self.flatten = None
  20. # 创建一个随机的虚拟样本以获取平铺的维度
  21. x = torch.randn(CHANNELS, DIM, DIM).view(-1, CHANNELS, DIM, DIM)
  22. x = self.convs(x)
  23. # 创建线性层
  24. self.fc1 = nn.Linear(self.flatten, 512)
  25. self.fc2 = nn.Linear(512, CLASSES)
  26. def convs(self, x):
  27. # 创建最大池化层
  28. x = F.max_pool2d(F.relu(self.conv1(x)), kernel_size=(2, 2))
  29. x = F.max_pool2d(F.relu(self.conv2(x)), kernel_size=(2, 2))
  30. x = F.max_pool2d(F.relu(self.conv3(x)), kernel_size=(2, 2))
  31. if not self.flatten:
  32. self.flatten = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]
  33. return x
  34. # 前向传播
  35. def forward(self, x):
  36. x = self.convs(x)
  37. x = x.view(-1, self.flatten)
  38. sm = F.relu(self.fc1(x))
  39. x = F.softmax(self.fc2(sm), dim=1)
  40. return x, sm
  41. x_train, y_train = training_set
  42. x_train, y_train = x_train.to(device), y_train.to(device)
  43. optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE)
  44. loss_func = nn.MSELoss()
  45. loss_log = []
  46. for epoch in range(EPOCHS):
  47. for i in tqdm(range(0, len(x_train), BATCH_SIZE)):
  48. x_batch = x_train[i:i+BATCH_SIZE].view(-1, CHANNELS, DIM, DIM).to(device)
  49. y_batch = y_train[i:i+BATCH_SIZE].to(device)
  50. net.zero_grad()
  51. output, sm = net(x_batch)
  52. loss = loss_func(output, y_batch.float())
  53. loss.backward()
  54. optimizer.step()
  55. loss_log.append(loss)
  56. # print(f"Epoch : {epoch} || Loss : {loss}")
  57. return loss_log
  58. train_set = (trainData.train_data, trainData.train_labels)
  59. test_set = (testData.test_data, testData.test_labels)
  60. EPOCHS = 5
  61. LEARNING_RATE = 0.001
  62. BATCH_SIZE = 32
  63. net = Net().to(device)
  64. loss_log = train(net, train_set, EPOCHS, LEARNING_RATE, BATCH_SIZE)

这是我收到的错误信息:

  1. RuntimeError Traceback (most recent call last)
  2. <ipython-input-8-0db1a1b4e37d> in <module>()
  3. 5 net = Net().to(device)
  4. 6
  5. ----> 7 loss_log = train(net, train_set, EPOCHS, LEARNING_RATE, BATCH_SIZE)
  6. 6 frames
  7. <ipython-input-6-7de4a78e3736> in train(net, training_set, EPOCHS, LEARNING_RATE, BATCH_SIZE)
  8. 13
  9. 14 net.zero_grad()
  10. ---> 15 output, sm = net(x_batch)
  11. 16 loss = loss_func(output, y_batch.float())
  12. 17 loss.backward()
  13. /usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
  14. 539 result = self._slow_forward(*input, **kwargs)
  15. 540 else:
  16. --> 541 result = self.forward(*input, **kwargs)
  17. 542 for hook in thismodel.forward_features(x)
  18. 543 hook_result = hook(thismodel, input, result)
  19. <ipython-input-5-4fddc427892a> in forward(self, x)
  20. 41 # 前向传播
  21. 42 def forward(self, x):
  22. ---> 43 x = self.convs(x)
  23. 44 x = x.view(-1, self.flatten)
  24. 45 sm = F.relu(self.fc1(x))
  25. <ipython-input-5-4fddc427892a> in convs(self, x)
  26. 31
  27. 32 # 创建最大池化层
  28. ---> 33 x = F.max_pool2d(F.relu(self.conv1(x)), kernel_size=(2, 2))
  29. 34 x = F.max_pool2d(F.relu(self.conv2(x)), kernel_size=(2, 2))
  30. 35 x = F.max_pool2d(F.relu(self.conv3(x)), kernel_size=(2, 2))
  31. /usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
  32. 539 result = thismodel.features
  33. 540 else:
  34. --> 541 result = thismodel.forward(*input, **kwargs)
  35. 542 for hook in thismodel.forward_features(x)
  36. 543 hook_result = hook(thismodel, input, result)
  37. <ipython-input-5-4fddc427892a> in forward(self, x)
  38. 41 # 前向传播
  39. 42 def forward(self, x):
  40. ---> 43 x = self.convs(x)
  41. 44 x = x.view(-1, self.flatten)
  42. 45 sm = F.relu(self.fc1(x))
  43. <ipython-input-5-4fddc427892a> in convs(self, x)
  44. 31
  45. 32 # 创建最大池化层
  46. ---> 33 x = F.max_pool2d(F.relu(self.conv1(x)), kernel_size=(2, 2))
  47. 34 x = F.max_pool2d(F.relu(self.conv2(x)), kernel_size=(2
  48. <details>
  49. <summary>英文:</summary>
  50. I&#39;m receiving the error,
  51. &gt; Input type (torch.cuda.ByteTensor) and weight type (torch.cuda.FloatTensor) should be the same
  52. Following is my code,
  53. device = torch.device(&#39;cuda:0&#39;)
  54. trainData = torchvision.datasets.FashionMNIST(&#39;/content/&#39;, train=True, transform=None, target_transform=None, download=True)
  55. testData = torchvision.datasets.FashionMNIST(&#39;/content/&#39;, train=False, transform=None, target_transform=None, download=True)
  56. class Net(nn.Module):
  57. def __init__(self):
  58. super().__init__()
  59. &#39;&#39;&#39;
  60. Network Structure:
  61. input &gt;
  62. (1)Conv2D &gt; (2)MaxPool2D &gt;
  63. (3)Conv2D &gt; (4)MaxPool2D &gt;
  64. (5)Conv2D &gt; (6)MaxPool2D &gt;
  65. (7)Linear &gt; (8)LinearOut
  66. &#39;&#39;&#39;
  67. # Creating the convulutional Layers
  68. self.conv1 = nn.Conv2d(in_channels=CHANNELS, out_channels=32, kernel_size=3)
  69. self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3)
  70. self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3)
  71. self.flatten = None
  72. # Creating a Random dummy sample to get the Flattened Dimensions
  73. x = torch.randn(CHANNELS, DIM, DIM).view(-1, CHANNELS, DIM, DIM)
  74. x = self.convs(x)
  75. # Creating the Linear Layers
  76. self.fc1 = nn.Linear(self.flatten, 512)
  77. self.fc2 = nn.Linear(512, CLASSES)
  78. def convs(self, x):
  79. # Creating the MaxPooling Layers
  80. x = F.max_pool2d(F.relu(self.conv1(x)), kernel_size=(2, 2))
  81. x = F.max_pool2d(F.relu(self.conv2(x)), kernel_size=(2, 2))
  82. x = F.max_pool2d(F.relu(self.conv3(x)), kernel_size=(2, 2))
  83. if not self.flatten:
  84. self.flatten = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]
  85. return x
  86. # FORWARD PASS
  87. def forward(self, x):
  88. x = self.convs(x)
  89. x = x.view(-1, self.flatten)
  90. sm = F.relu(self.fc1(x))
  91. x = F.softmax(self.fc2(sm), dim=1)
  92. return x, sm
  93. x_train, y_train = training_set
  94. x_train, y_train = x_train.to(device), y_train.to(device)
  95. optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE)
  96. loss_func = nn.MSELoss()
  97. loss_log = []
  98. for epoch in range(EPOCHS):
  99. for i in tqdm(range(0, len(x_train), BATCH_SIZE)):
  100. x_batch = x_train[i:i+BATCH_SIZE].view(-1, CHANNELS, DIM, DIM).to(device)
  101. y_batch = y_train[i:i+BATCH_SIZE].to(device)
  102. net.zero_grad()
  103. output, sm = net(x_batch)
  104. loss = loss_func(output, y_batch.float())
  105. loss.backward()
  106. optimizer.step()
  107. loss_log.append(loss)
  108. # print(f&quot;Epoch : {epoch} || Loss : {loss}&quot;)
  109. return loss_log
  110. train_set = (trainData.train_data, trainData.train_labels)
  111. test_set = (testData.test_data, testData.test_labels)
  112. EPOCHS = 5
  113. LEARNING_RATE = 0.001
  114. BATCH_SIZE = 32
  115. net = Net().to(device)
  116. loss_log = train(net, train_set, EPOCHS, LEARNING_RATE, BATCH_SIZE)
  117. **And this is the Error that I&#39;m getting,**
  118. RuntimeError Traceback (most recent call last)
  119. &lt;ipython-input-8-0db1a1b4e37d&gt; in &lt;module&gt;()
  120. 5 net = Net().to(device)
  121. 6
  122. ----&gt; 7 loss_log = train(net, train_set, EPOCHS, LEARNING_RATE, BATCH_SIZE)
  123. 6 frames
  124. &lt;ipython-input-6-7de4a78e3736&gt; in train(net, training_set, EPOCHS, LEARNING_RATE, BATCH_SIZE)
  125. 13
  126. 14 net.zero_grad()
  127. ---&gt; 15 output, sm = net(x_batch)
  128. 16 loss = loss_func(output, y_batch.float())
  129. 17 loss.backward()
  130. /usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
  131. 539 result = self._slow_forward(*input, **kwargs)
  132. 540 else:
  133. --&gt; 541 result = self.forward(*input, **kwargs)
  134. 542 for hook in self._forward_hooks.values():
  135. 543 hook_result = hook(self, input, result)
  136. &lt;ipython-input-5-4fddc427892a&gt; in forward(self, x)
  137. 41 # FORWARD PASS
  138. 42 def forward(self, x):
  139. ---&gt; 43 x = self.convs(x)
  140. 44 x = x.view(-1, self.flatten)
  141. 45 sm = F.relu(self.fc1(x))
  142. &lt;ipython-input-5-4fddc427892a&gt; in convs(self, x)
  143. 31
  144. 32 # Creating the MaxPooling Layers
  145. ---&gt; 33 x = F.max_pool2d(F.relu(self.conv1(x)), kernel_size=(2, 2))
  146. 34 x = F.max_pool2d(F.relu(self.conv2(x)), kernel_size=(2, 2))
  147. 35 x = F.max_pool2d(F.relu(self.conv3(x)), kernel_size=(2, 2))
  148. /usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
  149. 539 result = self._slow_forward(*input, **kwargs)
  150. 540 else:
  151. --&gt; 541 result = self.forward(*input, **kwargs)
  152. 542 for hook in self._forward_hooks.values():
  153. 543 hook_result = hook(self, input, result)
  154. /usr/local/lib/python3.6/dist-packages/torch/nn/modules/conv.py in forward(self, input)
  155. 343
  156. 344 def forward(self, input):
  157. --&gt; 345 return self.conv2d_forward(input, self.weight)
  158. 346
  159. 347 class Conv3d(_ConvNd):
  160. /usr/local/lib/python3.6/dist-packages/torch/nn/modules/conv.py in conv2d_forward(self, input, weight)
  161. 340 _pair(0), self.dilation, self.groups)
  162. 341 return F.conv2d(input, weight, self.bias, self.stride,
  163. --&gt; 342 self.padding, self.dilation, self.groups)
  164. 343
  165. 344 def forward(self, input):
  166. RuntimeError: Input type (torch.cuda.ByteTensor) and weight type (torch.cuda.FloatTensor) should be the same
  167. I double-checked that my Neural Net and my Inputs both are in GPU. I&#39;m still getting this error and I don&#39;t understand why!
  168. Somebody, please help me to get out of this error.
  169. </details>
  170. # 答案1
  171. **得分**: 23
  172. 将输入的 `x_batch` 转换为浮点型在将其传递到模型之前使用 `x_batch = x_batch.float()`
  173. <details>
  174. <summary>英文:</summary>
  175. Cast your input `x_batch` to float. Use `x_batch = x_batch.float()` before you pass it through your model.
  176. </details>

huangapple
  • 本文由 发表于 2020年1月4日 01:15:36
  • 转载请务必保留本文链接:https://go.coder-hub.com/59582663.html
匿名

发表评论

匿名网友

:?: :razz: :sad: :evil: :!: :smile: :oops: :grin: :eek: :shock: :???: :cool: :lol: :mad: :twisted: :roll: :wink: :idea: :arrow: :neutral: :cry: :mrgreen:

确定