Object Detection – 运行时错误:stack 期望每个张量具有相同的尺寸

huangapple go评论53阅读模式
英文:

Object Detection - RuntimeError: stack expects each tensor to be equal size

问题

以下是您提供的代码的翻译部分:

我创建了一个用于目标检测的自定义数据集命名为ReceiptDataset如下所示

from torch.nn.utils.rnn import pad_sequence
import torch.nn.functional as F

class ReceiptDataset(torch.utils.data.Dataset):
  def __init__(self, train_dir, width, height, labels, transforms=None):
    self.images = os.listdir(train_dir)
    self.width = width
    self.height = height
    self.train_dir = train_dir
    self.labels = labels
    self.transforms = transforms

  def __getitem__(self, idx):
    img_name = self.images[idx]
    img_path = os.path.join(self.train_dir, img_name)

    img = cv2.imread(img_path)
    img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB).astype(np.float32)
    img_res = cv2.resize(img_rgb, (self.width, self.height), cv2.INTER_AREA)

    img_res /= 255.0

    annot = self.labels[str(img_name)]

    lbls = []
    boxes = []
    target = {}

    ht, wt, _ = img.shape

    for item in annot:
      x, y, box_wt, box_ht, lbl = item

      x_min = x
      x_max = x + box_wt
      y_min = y
      y_max = y + box_ht

      x_min_corr = (x_min / wt) * self.width
      x_max_corr = (x_max / wt) * self.width
      y_min_corr = (y_min / ht) * self.height
      y_max_corr = (y_max / ht) * self.height

      boxes.append([x_min_corr, y_min_corr, x_max_corr, y_max_corr])

      lbls.append(classes.index(str(lbl))

    boxes = torch.as_tensor(boxes, dtype=torch.float32)
    lbls = torch.as_tensor(lbls, dtype=torch.int64)

    area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])

    iscrowd = torch.zeros((boxes.shape[0],), dtype=torch.int64)

    target["boxes"] = boxes
    target["labels"] = lbls
    target["image_id"] = torch.as_tensor(idx)
    target["area"] = area
    target["iscrowd"] = iscrowd

    if self.transforms:
      trans = self.transforms(image=img_res, bboxes=target["boxes"], labels=lbls)
      img_res = trans["image"]
      target["boxes"] = torch.Tensor(trans["bboxes"])

    return img_res, target

  def __len__(self):
    return len(self.images)

我创建了一个名为train_dataset的实例

train_dataset = ReceiptDataset("label-detector/images", width, height, plabels)

我的训练代码段如下

from engine import train_one_epoch, evaluate

for epoch in range(num_epochs):
  train_one_epoch(model, optim, train_loader, device, epoch, print_freq=2)

  lr_scheduler.step()

  evaluate(model, test_loader, device)

但每次运行训练循环时都会出现运行时错误

RuntimeError: stack 期望每个张量具有相同的大小但在条目0和条目1处分别为[11,4]和[9,4]

总共有17个类别每个图像至少有4个注释我注意到问题似乎来自数据集类中的标签列表/张量的大小标签列表/张量的大小根据图像中注释项的数量而变化但我无法弄清楚如何解决这个问题

谢谢
英文:

I created a custom dataset for object detection named ReceiptDataset as below.

from torch.nn.utils.rnn import pad_sequence
import torch.nn.functional as F
class ReceiptDataset(torch.utils.data.Dataset):
def __init__(self, train_dir,width,height,labels,transforms=None):
self.images = os.listdir(train_dir)
self.width = width
self.height = height
self.train_dir = train_dir
self.labels = labels
self.transforms = transforms
def __getitem__(self,idx):
img_name = self.images[idx]
img_path = os.path.join(self.train_dir,img_name)
#print(f"img_name: {img_name}")
img = cv2.imread(img_path)
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB).astype(np.float32)
img_res = cv2.resize(img_rgb,(self.width,self.height), cv2.INTER_AREA)
img_res /= 255.0
annot = self.labels[str(img_name)]
lbls = []
boxes = []
target = {}
ht, wt, _ = img.shape
#print(f"img_res shape: {img_res.shape}, orig shape: {wt}, {ht}")
for item in annot:
x,y,box_wt,box_ht,lbl = item
x_min = x
x_max = x + box_wt
y_min = y
y_max = y + box_ht
x_min_corr = (x_min / wt) * self.width
x_max_corr = (x_max /wt ) * self.width
y_min_corr = (y_min / ht) * self.height
y_max_corr = (y_max / ht) * self.height
boxes.append([x_min_corr, y_min_corr, x_max_corr, y_max_corr])
lbls.append( classes.index(str(lbl)) )
#print(f"dls_lbls: {lbls}, {len(lbls)}")
#lbls += [-1] * (NUM_CLASSES - len(lbls))
boxes = torch.as_tensor(boxes, dtype=torch.float32)
lbls = torch.as_tensor(lbls, dtype=torch.int64)
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
iscrowd = torch.zeros((boxes.shape[0],), dtype=torch.int64)
target["boxes"]  = boxes
target["labels"] = lbls
target["image_id"] = torch.as_tensor(idx)
target["area"] = area
target["iscrowd"] = iscrowd
#print(f"dls_lbls -- 2: {target['labels']}, { target['labels'].shape }")
if self.transforms:
trans = self.transforms(image=img_res,
bboxes = target["boxes"],
labels=lbls
)
img_res = trans["image"]
target["boxes"] = torch.Tensor(trans["bboxes"])
return img_res, target
def __len__(self):
return len(self.images)

and I created an instance with:

train_dataset = ReceiptDataset("label-detector/images",width,height,plabels)

and my training snippet is :

from engine import train_one_epoch, evaluate
for epoch in range(num_epochs):
train_one_epoch(model,optim,train_loader,device,epoch,print_freq=2)
lr_scheduler.step()
evaluate(model,test_loader,device)

but anytime I run the training loop, I’m getting a runtime error:

RuntimeError: stack expects each tensor to be equal size, but got [11,4] at entry 0 and [9,4] at entry 1

There are 17 classes in total and each image has a minimum of 4 annotations.
I noticed the problem seems to be coming from my labels list/tensor in the dataset class, the size of the labels list/tensor varies based on the number of annotated items in an image, but I can’t seem to figure out a way to fix this.

Thank you!

答案1

得分: 0

我通过为数据加载器实现一个自定义的整理函数来解决这个问题,该函数返回适合我的模型所需的数据批次。

def collate_fn_seq(batch):
    images = [item[0] for item in batch]
    targets = [item[1] for item in batch]

    imgs = []
    for image in images:
        img = torch.from_numpy(image).permute(2, 0, 1)
        imgs.append(img)

    boxes = [target["boxes"] for target in targets]

    labels = [target["labels"] for target in targets]

    image_ids = [target["image_id"] for target in targets]
    areas = [target["area"] for target in targets]
    iscrowds = [target["iscrowd"] for target in targets]

    tars = []

    for i in range(len(batch)):
        box = boxes[i]
        label = labels[i]
        image_id = image_ids[i]
        area = areas[i]
        iscrowd = iscrowds[i]

        target = {"boxes": box, "labels": label, "image_id": image_id, "area": area, "iscrowd": iscrowd}
        tars.append(target)

    return imgs, tars

并在我的数据加载器中包含它:

train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=8, shuffle=True, collate_fn=collate_fn_seq)
英文:

I solved it by implementing a custom collate function for the dataloader that returns a batch of my dataset as needed by my model.

def collate_fn_seq(batch):
images = [ item[0] for item in batch ]
targets = [ item[1] for item in batch ]
imgs = []
for image in images:
img = torch.from_numpy(image).permute(2, 0, 1)
imgs.append(img)
boxes = [target["boxes"] for target in targets]
labels = [target["labels"] for target in targets]
image_ids = [ target["image_id"] for target in targets ]
areas = [target["area"] for target in targets]
iscrowds = [target["iscrowd"] for target in targets]
tars = []
for i in range(len(batch)):
box = boxes[i]
label = labels[i]
image_id = image_ids[i]
area = areas[i]
iscrowd = iscrowds[i]
target = {"boxes": box, "labels": label, "image_id": image_id, "area": area, "iscrowd": iscrowd}
tars.append(target)
return imgs, tars

and included it in my dataloaders using:

train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=8, shuffle=True, collate_fn=collate_fn_seq)

huangapple
  • 本文由 发表于 2023年2月10日 16:53:55
  • 转载请务必保留本文链接:https://go.coder-hub.com/75408825.html
匿名

发表评论

匿名网友

:?: :razz: :sad: :evil: :!: :smile: :oops: :grin: :eek: :shock: :???: :cool: :lol: :mad: :twisted: :roll: :wink: :idea: :arrow: :neutral: :cry: :mrgreen:

确定