AttributeError: 'str' object has no attribute 'items' when using RawframeDataset with mvit in mmaction2

huangapple go评论66阅读模式
英文:

AttributeError: 'str' object has no attribute 'items' when using RawframeDataset with mvit in mmaction2

问题

I am using mmaction2 branch 1.x. I recently migrated from 0.24 and want to use mvit model. When I train my configuration with RawframeDataset, it stops with message: AttributeError: 'str' object has no attribute 'items' (please see below for detailed log). Any suggestion? Thank you.

英文:

I am using mmaction2 branch 1.x. I recently migrated from 0.24 and want to use mvit model. When I train my configuration with RawframeDataset, it stops with message: AttributeError: 'str' object has no attribute 'items' (please see below for detailed log). Any suggestion? Thank you.

Configuration

#something_mvit.py

_base_ = [
    'mmaction2/configs/_base_/models/mvit_small.py', 'mmaction2/configs/_base_/default_runtime.py'
]

repeat_times = 1
num_classes = 10
batch_size = 1

# model settings
model = dict(
    backbone=dict(
        arch='large',
        temporal_size=40,
        spatial_size=312,
        drop_path_rate=0.75,
    ),
    data_preprocessor=dict(
        type='ActionDataPreprocessor',
        mean=[114.75, 114.75, 114.75],
        std=[57.375, 57.375, 57.375],
        blending=dict(
            type='RandomBatchAugment',
            augments=[
                dict(type='MixupBlending', alpha=0.8, num_classes=400),
                dict(type='CutmixBlending', alpha=1, num_classes=400)
            ]),
        format_shape='NCTHW'),
    cls_head=dict(in_channels=1152, num_classes=num_classes),
    test_cfg=dict(max_testing_views=5))

# dataset settings
dataset_type = 'RawframeDataset'
data_root = 'dataset'
data_root_val = 'dataset'
ann_file_train = 'dataset/train_rawdataset.txt'
ann_file_val = 'dataset/val_rawdataset.txt'
ann_file_test = 'dataset/test_rawdataset.txt'

file_client_args = dict(io_backend='disk')
train_pipeline = [
    dict(type='UniformSampleFrames', clip_len=40),
    dict(type='RawFrameDecode', **file_client_args),
    dict(type='Resize', scale=(-1, 256)),
    dict(
        type='PytorchVideoWrapper',
        op='RandAugment',
        magnitude=7,
        num_layers=4),
    dict(type='RandomErasing', erase_prob=0.25, mode='rand'),
    dict(type='FormatShape', input_format='NCTHW'),
    dict(type='PackActionInputs')
]
val_pipeline = [
    dict(type='UniformSampleFrames', clip_len=40, test_mode=True),
    dict(type='RawFrameDecode', **file_client_args),
    dict(type='Resize', scale=(-1, 256)),
    dict(type='CenterCrop', crop_size=224),
    dict(type='FormatShape', input_format='NCTHW'),
    dict(type='PackActionInputs')
]
test_pipeline = [
    dict(type='UniformSampleFrames', clip_len=40, test_mode=True),
    dict(type='RawFrameDecode', **file_client_args),
    dict(type='Resize', scale=(-1, 224)),
    dict(type='ThreeCrop', crop_size=224),
    dict(type='FormatShape', input_format='NCTHW'),
    dict(type='PackActionInputs')
]
with_offset = True
filename_tmpl = '{:05}.jpg'
train_dataloader = dict(
    batch_size=batch_size,
    num_workers=1,
    persistent_workers=True,
    sampler=dict(type='DefaultSampler', shuffle=True),
    dataset=dict(
        type='RepeatDataset',
        times=repeat_times,
        dataset=dict(
            type=dataset_type,
            ann_file=ann_file_train,
            data_prefix=data_root,
            num_classes=num_classes,
            with_offset=with_offset,
            filename_tmpl=filename_tmpl,
            pipeline=train_pipeline)))
val_dataloader = dict(
    batch_size=batch_size,
    num_workers=1,
    persistent_workers=True,
    sampler=dict(type='DefaultSampler', shuffle=False),
    dataset=dict(
        type='RepeatDataset',
        times=repeat_times,
        dataset=dict(
            type=dataset_type,
            ann_file=ann_file_val,
            data_prefix=data_root_val,
            num_classes=num_classes,
            with_offset=with_offset,
            filename_tmpl=filename_tmpl,
            pipeline=val_pipeline)))
test_dataloader = dict(
    batch_size=1,
    num_workers=1,
    persistent_workers=True,
    sampler=dict(type='DefaultSampler', shuffle=False),
    dataset=dict(
        type=dataset_type,
        ann_file=ann_file_test,
        data_prefix=data_root_val,
        num_classes=num_classes,
        with_offset=with_offset,
        filename_tmpl=filename_tmpl,
        pipeline=test_pipeline))
val_evaluator = dict(type='AccMetric')
test_evaluator = val_evaluator

train_cfg = dict(
    type='EpochBasedTrainLoop', max_epochs=100, val_begin=1, val_interval=3)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')

base_lr = 1.6e-3
optim_wrapper = dict(
    type='AmpOptimWrapper',
    optimizer=dict(
        type='AdamW', lr=base_lr, betas=(0.9, 0.999), weight_decay=0.05))

param_scheduler = [
    dict(
        type='LinearLR',
        start_factor=0.1,
        by_epoch=True,
        begin=0,
        end=30,
        convert_to_iter_based=True),
    dict(
        type='CosineAnnealingLR',
        T_max=70,
        eta_min=base_lr / 100,
        by_epoch=True,
        begin=30,
        end=100,
        convert_to_iter_based=True)
]

default_hooks = dict(
    checkpoint=dict(interval=3, max_keep_ckpts=5), logger=dict(interval=10))

# Default setting for scaling LR automatically
#   - `enable` means enable scaling LR automatically
#       or not by default.
#   - `base_batch_size` = (8 GPUs) x (8 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=64)
# runtime settings
# checkpoint_config = dict(interval=5)
work_dir = 'runs/'

Log

thomas@RYZEN9:~/proyek$ pyenv exec mmaction2/tools/dist_train.sh something_mvit.py 1
+ CONFIG=something_mvit.py
+ GPUS=1
+ NNODES=1
+ NODE_RANK=0
+ PORT=29500
+ MASTER_ADDR=127.0.0.1
++ dirname /home/thomas/proyek/mmaction2/tools/dist_train.sh
++ dirname /home/thomas/proyek/mmaction2/tools/dist_train.sh
+ PYTHONPATH=/home/thomas/proyek/mmaction2/tools/..:
+ python -m torch.distributed.launch --nnodes=1 --node_rank=0 --master_addr=127.0.0.1 --nproc_per_node=1 --master_port=29500 /home/thomas/proyek/mmaction2/tools/train.py something_mvit.py --launcher pytorch
/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/site-packages/torch/distributed/launch.py:180: FutureWarning: The module torch.distributed.launch is deprecated
and will be removed in future. Use torchrun.
Note that --use_env is set by default in torchrun.
If your script expects `--local_rank` argument to be set, please
change it to read from `os.environ['LOCAL_RANK']` instead. See 
https://pytorch.org/docs/stable/distributed.html#launch-utility for 
further instructions
warnings.warn(
/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/site-packages/mmengine/utils/dl_utils/setup_env.py:46: UserWarning: Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
warnings.warn(
/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/site-packages/mmengine/utils/dl_utils/setup_env.py:56: UserWarning: Setting MKL_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
warnings.warn(
02/16 09:14:50 - mmengine - WARNING - The "log_processor" registry in mmaction did not set import location. Fallback to call `mmaction.utils.register_all_modules` instead.
02/16 09:14:51 - mmengine - INFO - 
------------------------------------------------------------
System environment:
sys.platform: linux
Python: 3.10.8 (main, Nov 24 2022, 14:13:03) [GCC 11.2.0]
CUDA available: True
numpy_random_seed: 209724671
GPU 0: NVIDIA GeForce RTX 3070
CUDA_HOME: None
GCC: gcc (Ubuntu 11.3.0-1ubuntu1~22.04) 11.3.0
PyTorch: 1.13.1+cu117
PyTorch compiling details: PyTorch built with:
- GCC 9.3
- C++ Version: 201402
- Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications
- Intel(R) MKL-DNN v2.6.0 (Git Hash 52b5f107dd9cf10910aaa19cb47f3abf9b349815)
- OpenMP 201511 (a.k.a. OpenMP 4.5)
- LAPACK is enabled (usually provided by MKL)
- NNPACK is enabled
- CPU capability usage: AVX2
- CUDA Runtime 11.7
- NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86
- CuDNN 8.5
- Magma 2.6.1
- Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.7, CUDNN_VERSION=8.5.0, CXX_COMPILER=/opt/rh/devtoolset-9/root/usr/bin/c++, CXX_FLAGS= -fabi-version=11 -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Werror=non-virtual-dtor -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wunused-local-typedefs -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Werror=cast-function-type -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.13.1, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, 
TorchVision: 0.14.1+cu117
OpenCV: 4.7.0
MMEngine: 0.5.0
Runtime environment:
cudnn_benchmark: False
mp_cfg: {'mp_start_method': 'fork', 'opencv_num_threads': 0}
dist_cfg: {'backend': 'nccl'}
seed: None
diff_rank_seed: False
deterministic: False
Distributed launcher: pytorch
Distributed training: True
GPU number: 1
------------------------------------------------------------
02/16 09:14:51 - mmengine - INFO - Config:
model = dict(
type='Recognizer3D',
backbone=dict(
type='MViT',
arch='large',
drop_path_rate=0.75,
temporal_size=40,
spatial_size=312),
data_preprocessor=dict(
type='ActionDataPreprocessor',
mean=[114.75, 114.75, 114.75],
std=[57.375, 57.375, 57.375],
format_shape='NCTHW',
blending=dict(
type='RandomBatchAugment',
augments=[
dict(type='MixupBlending', alpha=0.8, num_classes=400),
dict(type='CutmixBlending', alpha=1, num_classes=400)
])),
cls_head=dict(
type='MViTHead',
in_channels=1152,
num_classes=10,
label_smooth_eps=0.1,
average_clips='prob'),
test_cfg=dict(max_testing_views=5))
default_scope = 'mmaction'
default_hooks = dict(
runtime_info=dict(type='RuntimeInfoHook'),
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=10, ignore_last=False),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(
type='CheckpointHook', interval=3, save_best='auto', max_keep_ckpts=5),
sampler_seed=dict(type='DistSamplerSeedHook'),
sync_buffers=dict(type='SyncBuffersHook'))
env_cfg = dict(
cudnn_benchmark=False,
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
dist_cfg=dict(backend='nccl'))
log_processor = dict(type='LogProcessor', window_size=20, by_epoch=True)
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
type='ActionVisualizer', vis_backends=[dict(type='LocalVisBackend')])
log_level = 'INFO'
load_from = None
resume = False
repeat_times = 1
num_classes = 10
batch_size = 1
dataset_type = 'RawframeDataset'
data_root = 'dataset'
data_root_val = 'dataset'
ann_file_train = 'dataset/train_rawdataset.txt'
ann_file_val = 'dataset/val_rawdataset.txt'
ann_file_test = 'dataset/test_rawdataset.txt'
file_client_args = dict(io_backend='disk')
train_pipeline = [
dict(type='UniformSampleFrames', clip_len=40),
dict(type='RawFrameDecode', io_backend='disk'),
dict(type='Resize', scale=(-1, 256)),
dict(
type='PytorchVideoWrapper',
op='RandAugment',
magnitude=7,
num_layers=4),
dict(type='RandomErasing', erase_prob=0.25, mode='rand'),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='PackActionInputs')
]
val_pipeline = [
dict(type='UniformSampleFrames', clip_len=40, test_mode=True),
dict(type='RawFrameDecode', io_backend='disk'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='PackActionInputs')
]
test_pipeline = [
dict(type='UniformSampleFrames', clip_len=40, test_mode=True),
dict(type='RawFrameDecode', io_backend='disk'),
dict(type='Resize', scale=(-1, 224)),
dict(type='ThreeCrop', crop_size=224),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='PackActionInputs')
]
with_offset = True
filename_tmpl = '{:05}.jpg'
train_dataloader = dict(
batch_size=1,
num_workers=1,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type='RepeatDataset',
times=1,
dataset=dict(
type='RawframeDataset',
ann_file='dataset/train_rawdataset.txt',
data_prefix='dataset',
num_classes=10,
with_offset=True,
filename_tmpl='{:05}.jpg',
pipeline=[
dict(type='UniformSampleFrames', clip_len=40),
dict(type='RawFrameDecode', io_backend='disk'),
dict(type='Resize', scale=(-1, 256)),
dict(
type='PytorchVideoWrapper',
op='RandAugment',
magnitude=7,
num_layers=4),
dict(type='RandomErasing', erase_prob=0.25, mode='rand'),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='PackActionInputs')
])))
val_dataloader = dict(
batch_size=1,
num_workers=1,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type='RepeatDataset',
times=1,
dataset=dict(
type='RawframeDataset',
ann_file='dataset/val_rawdataset.txt',
data_prefix='dataset',
num_classes=10,
with_offset=True,
filename_tmpl='{:05}.jpg',
pipeline=[
dict(type='UniformSampleFrames', clip_len=40, test_mode=True),
dict(type='RawFrameDecode', io_backend='disk'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='PackActionInputs')
])))
test_dataloader = dict(
batch_size=1,
num_workers=1,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type='RawframeDataset',
ann_file='dataset/test_rawdataset.txt',
data_prefix='dataset',
num_classes=10,
with_offset=True,
filename_tmpl='{:05}.jpg',
pipeline=[
dict(type='UniformSampleFrames', clip_len=40, test_mode=True),
dict(type='RawFrameDecode', io_backend='disk'),
dict(type='Resize', scale=(-1, 224)),
dict(type='ThreeCrop', crop_size=224),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='PackActionInputs')
]))
val_evaluator = dict(type='AccMetric')
test_evaluator = dict(type='AccMetric')
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=100, val_begin=1, val_interval=3)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
base_lr = 0.0016
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=dict(
type='AdamW', lr=0.0016, betas=(0.9, 0.999), weight_decay=0.05))
param_scheduler = [
dict(
type='LinearLR',
start_factor=0.1,
by_epoch=True,
begin=0,
end=30,
convert_to_iter_based=True),
dict(
type='CosineAnnealingLR',
T_max=70,
eta_min=1.6e-05,
by_epoch=True,
begin=30,
end=100,
convert_to_iter_based=True)
]
auto_scale_lr = dict(enable=False, base_batch_size=64)
work_dir = 'runs/'
launcher = 'pytorch'
randomness = dict(seed=None, diff_rank_seed=False, deterministic=False)
02/16 09:14:51 - mmengine - WARNING - The "visualizer" registry in mmaction did not set import location. Fallback to call `mmaction.utils.register_all_modules` instead.
02/16 09:14:51 - mmengine - WARNING - The "vis_backend" registry in mmaction did not set import location. Fallback to call `mmaction.utils.register_all_modules` instead.
02/16 09:14:51 - mmengine - WARNING - The "model" registry in mmaction did not set import location. Fallback to call `mmaction.utils.register_all_modules` instead.
02/16 09:14:53 - mmengine - WARNING - The "hook" registry in mmaction did not set import location. Fallback to call `mmaction.utils.register_all_modules` instead.
02/16 09:14:53 - mmengine - INFO - Hooks will be executed in the following order:
before_run:
(VERY_HIGH   ) RuntimeInfoHook                    
(BELOW_NORMAL) LoggerHook                         
-------------------- 
before_train:
(VERY_HIGH   ) RuntimeInfoHook                    
(NORMAL      ) IterTimerHook                      
(VERY_LOW    ) CheckpointHook                     
-------------------- 
before_train_epoch:
(VERY_HIGH   ) RuntimeInfoHook                    
(NORMAL      ) IterTimerHook                      
(NORMAL      ) DistSamplerSeedHook                
-------------------- 
before_train_iter:
(VERY_HIGH   ) RuntimeInfoHook                    
(NORMAL      ) IterTimerHook                      
-------------------- 
after_train_iter:
(VERY_HIGH   ) RuntimeInfoHook                    
(NORMAL      ) IterTimerHook                      
(BELOW_NORMAL) LoggerHook                         
(LOW         ) ParamSchedulerHook                 
(VERY_LOW    ) CheckpointHook                     
-------------------- 
after_train_epoch:
(NORMAL      ) IterTimerHook                      
(NORMAL      ) SyncBuffersHook                    
(LOW         ) ParamSchedulerHook                 
(VERY_LOW    ) CheckpointHook                     
-------------------- 
before_val_epoch:
(NORMAL      ) IterTimerHook                      
-------------------- 
before_val_iter:
(NORMAL      ) IterTimerHook                      
-------------------- 
after_val_iter:
(NORMAL      ) IterTimerHook                      
(BELOW_NORMAL) LoggerHook                         
-------------------- 
after_val_epoch:
(VERY_HIGH   ) RuntimeInfoHook                    
(NORMAL      ) IterTimerHook                      
(BELOW_NORMAL) LoggerHook                         
(LOW         ) ParamSchedulerHook                 
(VERY_LOW    ) CheckpointHook                     
-------------------- 
before_test_epoch:
(NORMAL      ) IterTimerHook                      
-------------------- 
before_test_iter:
(NORMAL      ) IterTimerHook                      
-------------------- 
after_test_iter:
(NORMAL      ) IterTimerHook                      
(BELOW_NORMAL) LoggerHook                         
-------------------- 
after_test_epoch:
(VERY_HIGH   ) RuntimeInfoHook                    
(NORMAL      ) IterTimerHook                      
(BELOW_NORMAL) LoggerHook                         
-------------------- 
after_run:
(BELOW_NORMAL) LoggerHook                         
-------------------- 
02/16 09:14:53 - mmengine - WARNING - The "loop" registry in mmaction did not set import location. Fallback to call `mmaction.utils.register_all_modules` instead.
02/16 09:14:53 - mmengine - WARNING - The "dataset" registry in mmaction did not set import location. Fallback to call `mmaction.utils.register_all_modules` instead.
Traceback (most recent call last):
File "/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/site-packages/mmengine/registry/build_functions.py", line 121, in build_from_cfg
obj = obj_cls(**args)  # type: ignore
File "/home/thomas/proyek/mmaction2/mmaction/datasets/rawframe_dataset.py", line 99, in __init__
super().__init__(
File "/home/thomas/proyek/mmaction2/mmaction/datasets/base.py", line 48, in __init__
super().__init__(
File "/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/site-packages/mmengine/dataset/base_dataset.py", line 241, in __init__
self._join_prefix()
File "/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/site-packages/mmengine/dataset/base_dataset.py", line 538, in _join_prefix
for data_key, prefix in self.data_prefix.items():
AttributeError: 'str' object has no attribute 'items'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/site-packages/mmengine/registry/build_functions.py", line 121, in build_from_cfg
obj = obj_cls(**args)  # type: ignore
File "/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/site-packages/mmengine/dataset/dataset_wrapper.py", line 207, in __init__
self.dataset = DATASETS.build(dataset)
File "/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/site-packages/mmengine/registry/registry.py", line 521, in build
return self.build_func(cfg, *args, **kwargs, registry=self)
File "/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/site-packages/mmengine/registry/build_functions.py", line 135, in build_from_cfg
raise type(e)(
AttributeError: class `RawframeDataset` in mmaction/datasets/rawframe_dataset.py: 'str' object has no attribute 'items'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/site-packages/mmengine/registry/build_functions.py", line 121, in build_from_cfg
obj = obj_cls(**args)  # type: ignore
File "/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/site-packages/mmengine/runner/loops.py", line 43, in __init__
super().__init__(runner, dataloader)
File "/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/site-packages/mmengine/runner/base_loop.py", line 26, in __init__
self.dataloader = runner.build_dataloader(
File "/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/site-packages/mmengine/runner/runner.py", line 1333, in build_dataloader
dataset = DATASETS.build(dataset_cfg)
File "/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/site-packages/mmengine/registry/registry.py", line 521, in build
return self.build_func(cfg, *args, **kwargs, registry=self)
File "/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/site-packages/mmengine/registry/build_functions.py", line 135, in build_from_cfg
raise type(e)(
AttributeError: class `RepeatDataset` in mmengine/dataset/dataset_wrapper.py: class `RawframeDataset` in mmaction/datasets/rawframe_dataset.py: 'str' object has no attribute 'items'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/thomas/proyek/mmaction2/tools/train.py", line 141, in <module>
main()
File "/home/thomas/proyek/mmaction2/tools/train.py", line 137, in main
runner.train()
File "/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/site-packages/mmengine/runner/runner.py", line 1656, in train
self._train_loop = self.build_train_loop(
File "/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/site-packages/mmengine/runner/runner.py", line 1448, in build_train_loop
loop = LOOPS.build(
File "/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/site-packages/mmengine/registry/registry.py", line 521, in build
return self.build_func(cfg, *args, **kwargs, registry=self)
File "/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/site-packages/mmengine/registry/build_functions.py", line 135, in build_from_cfg
raise type(e)(
AttributeError: class `EpochBasedTrainLoop` in mmengine/runner/loops.py: class `RepeatDataset` in mmengine/dataset/dataset_wrapper.py: class `RawframeDataset` in mmaction/datasets/rawframe_dataset.py: 'str' object has no attribute 'items'
ERROR:torch.distributed.elastic.multiprocessing.api:failed (exitcode: 1) local_rank: 0 (pid: 2909183) of binary: /home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/bin/python
Traceback (most recent call last):
File "/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/runpy.py", line 196, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/runpy.py", line 86, in _run_code
exec(code, run_globals)
File "/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/site-packages/torch/distributed/launch.py", line 195, in <module>
main()
File "/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/site-packages/torch/distributed/launch.py", line 191, in main
launch(args)
File "/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/site-packages/torch/distributed/launch.py", line 176, in launch
run(args)
File "/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/site-packages/torch/distributed/run.py", line 753, in run
elastic_launch(
File "/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 132, in __call__
return launch_agent(self._config, self._entrypoint, list(args))
File "/home/thomas/.pyenv/versions/miniconda3-3.10-22.11.1-1/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 246, in launch_agent
raise ChildFailedError(
torch.distributed.elastic.multiprocessing.errors.ChildFailedError: 
============================================================
/home/thomas/proyek/mmaction2/tools/train.py FAILED
------------------------------------------------------------
Failures:
<NO_OTHER_FAILURES>
------------------------------------------------------------
Root Cause (first observed failure):
[0]:
time      : 2023-02-16_09:14:54
host      : RYZEN9
rank      : 0 (local_rank: 0)
exitcode  : 1 (pid: 2909183)
error_file: <N/A>
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
============================================================

答案1

得分: 0

我找到了问题的罪魁祸首。将 data_prefix=data_root_xxx 替换为 data_prefix=dict(img=data_root_xxx)

这是工作配置:

# something_mvit.py

_base_ = [
    'mmaction2/configs/_base_/models/mvit_small.py', 'mmaction2/configs/_base_/default_runtime.py'
]

repeat_times = 1
num_classes = 10
batch_size = 1

# model settings
model = dict(
    backbone=dict(
        arch='large',
        temporal_size=40,
        spatial_size=312,
        drop_path_rate=0.75,
    ),
    data_preprocessor=dict(
        type='ActionDataPreprocessor',
        mean=[114.75, 114.75, 114.75],
        std=[57.375, 57.375, 57.375],
        blending=dict(
            type='RandomBatchAugment',
            augments=[
                dict(type='MixupBlending', alpha=0.8, num_classes=400),
                dict(type='CutmixBlending', alpha=1, num_classes=400)
            ]),
        format_shape='NCTHW'),
    cls_head=dict(in_channels=1152, num_classes=num_classes),
    test_cfg=dict(max_testing_views=5))

# dataset settings
dataset_type = 'RawframeDataset'
data_root = 'dataset'
data_root_val = 'dataset'
ann_file_train = 'dataset/train_rawdataset.txt'
ann_file_val = 'dataset/val_rawdataset.txt'
ann_file_test = 'dataset/test_rawdataset.txt'

file_client_args = dict(io_backend='disk')
train_pipeline = [
    dict(type='UniformSampleFrames', clip_len=40),
    dict(type='RawFrameDecode', **file_client_args),
    dict(type='Resize', scale=(-1, 256)),
    dict(
        type='PytorchVideoWrapper',
        op='RandAugment',
        magnitude=7,
        num_layers=4),
    dict(type='RandomErasing', erase_prob=0.25, mode='rand'),
    dict(type='FormatShape', input_format='NCTHW'),
    dict(type='PackActionInputs')
]
val_pipeline = [
    dict(type='UniformSampleFrames', clip_len=40, test_mode=True),
    dict(type='RawFrameDecode', **file_client_args),
    dict(type='Resize', scale=(-1, 256)),
    dict(type='CenterCrop', crop_size=224),
    dict(type='FormatShape', input_format='NCTHW'),
    dict(type='PackActionInputs')
]
test_pipeline = [
    dict(type='UniformSampleFrames', clip_len=40, test_mode=True),
    dict(type='RawFrameDecode', **file_client_args),
    dict(type='Resize', scale=(-1, 224)),
    dict(type='ThreeCrop', crop_size=224),
    dict(type='FormatShape', input_format='NCTHW'),
    dict(type='PackActionInputs')
]
with_offset = True
filename_tmpl = '{:05}.jpg'
train_dataloader = dict(
    batch_size=batch_size,
    num_workers=1,
    persistent_workers=True,
    sampler=dict(type='DefaultSampler', shuffle=True),
    dataset=dict(
        type='RepeatDataset',
        times=repeat_times,
        dataset=dict(
            type=dataset_type,
            ann_file=ann_file_train,
            data_prefix=dict(img=data_root),
            num_classes=num_classes,
            with_offset=with_offset,
            filename_tmpl=filename_tmpl,
            pipeline=train_pipeline)))
val_dataloader = dict(
    batch_size=batch_size,
    num_workers=1,
    persistent_workers=True,
    sampler=dict(type='DefaultSampler', shuffle=False),
    dataset=dict(
        type='RepeatDataset',
        times=repeat_times,
        dataset=dict(
            type=dataset_type,
            ann_file=ann_file_val,
            data_prefix=dict(img=data_root_val),
            num_classes=num_classes,
            with_offset=with_offset,
            filename_tmpl=filename_tmpl,
            pipeline=val_pipeline,
            test_mode=True)))
test_dataloader = dict(
    batch_size=1,
    num_workers=1,
    persistent_workers=True,
    sampler=dict(type='DefaultSampler', shuffle=False),
    dataset=dict(
        type=dataset_type,
        ann_file=ann_file_val,
        data_prefix=dict(img=data_root_val),
        num_classes=num_classes,
        with_offset=with_offset,
        filename_tmpl=filename_tmpl,
        pipeline=test_pipeline,
        test_mode=True))
val_evaluator = dict(type='AccMetric')
test_evaluator = val_evaluator

train_cfg = dict(
    type='EpochBasedTrainLoop', max_epochs=100, val_begin=1, val_interval=3)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')

base_lr = 1.6e-3
optim_wrapper = dict(
    type='AmpOptimWrapper',
    optimizer=dict(
        type='AdamW', lr=base_lr, betas=(0.9, 0.999), weight_decay=0.05))

param_scheduler = [
    dict(
        type='LinearLR',
        start_factor=0.1,
        by_epoch=True,
        begin=0,
        end=30,
        convert_to_iter_based=True),
    dict(
        type='CosineAnnealingLR',
        T_max=70,
        eta_min=base_lr / 100,
        by_epoch=True,
        begin=30,
        end=100,
        convert_to_iter_based=True)
]

default_hooks = dict(
    checkpoint=dict(interval=3, max_keep_ckpts=5), logger=dict(interval=10)

# 默认设置以自动缩放学习率
#   - `enable` 表示是否默认启用自动缩放学习率。
#   - `base_batch_size` = (8 GPU) x (每个GPU 8个样本)。
auto_scale_lr = dict(enable=False, base_batch_size=64)
# 运行时设置
# checkpoint_config = dict(interval=5)
work_dir = 'runs/'
英文:

I have found the culprit. Replace data_prefix=data_root_xxx with data_prefix=dict(img=data_root_xxx)

This is the working config:

#something_mvit.py
_base_ = [
'mmaction2/configs/_base_/models/mvit_small.py', 'mmaction2/configs/_base_/default_runtime.py'
]
repeat_times = 1
num_classes = 10
batch_size = 1
# model settings
model = dict(
backbone=dict(
arch='large',
temporal_size=40,
spatial_size=312,
drop_path_rate=0.75,
),
data_preprocessor=dict(
type='ActionDataPreprocessor',
mean=[114.75, 114.75, 114.75],
std=[57.375, 57.375, 57.375],
blending=dict(
type='RandomBatchAugment',
augments=[
dict(type='MixupBlending', alpha=0.8, num_classes=400),
dict(type='CutmixBlending', alpha=1, num_classes=400)
]),
format_shape='NCTHW'),
cls_head=dict(in_channels=1152, num_classes=num_classes),
test_cfg=dict(max_testing_views=5))
# dataset settings
dataset_type = 'RawframeDataset'
data_root = 'dataset'
data_root_val = 'dataset'
ann_file_train = 'dataset/train_rawdataset.txt'
ann_file_val = 'dataset/val_rawdataset.txt'
ann_file_test = 'dataset/test_rawdataset.txt'
file_client_args = dict(io_backend='disk')
train_pipeline = [
dict(type='UniformSampleFrames', clip_len=40),
dict(type='RawFrameDecode', **file_client_args),
dict(type='Resize', scale=(-1, 256)),
dict(
type='PytorchVideoWrapper',
op='RandAugment',
magnitude=7,
num_layers=4),
dict(type='RandomErasing', erase_prob=0.25, mode='rand'),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='PackActionInputs')
]
val_pipeline = [
dict(type='UniformSampleFrames', clip_len=40, test_mode=True),
dict(type='RawFrameDecode', **file_client_args),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='PackActionInputs')
]
test_pipeline = [
dict(type='UniformSampleFrames', clip_len=40, test_mode=True),
dict(type='RawFrameDecode', **file_client_args),
dict(type='Resize', scale=(-1, 224)),
dict(type='ThreeCrop', crop_size=224),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='PackActionInputs')
]
with_offset = True
filename_tmpl = '{:05}.jpg'
train_dataloader = dict(
batch_size=batch_size,
num_workers=1,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type='RepeatDataset',
times=repeat_times,
dataset=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=dict(img=data_root),
num_classes=num_classes,
with_offset=with_offset,
filename_tmpl=filename_tmpl,
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=batch_size,
num_workers=1,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type='RepeatDataset',
times=repeat_times,
dataset=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=dict(img=data_root_val),
num_classes=num_classes,
with_offset=with_offset,
filename_tmpl=filename_tmpl,
pipeline=val_pipeline,
test_mode=True)))
test_dataloader = dict(
batch_size=1,
num_workers=1,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=dict(img=data_root_val),
num_classes=num_classes,
with_offset=with_offset,
filename_tmpl=filename_tmpl,
pipeline=test_pipeline,
test_mode=True))
val_evaluator = dict(type='AccMetric')
test_evaluator = val_evaluator
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=100, val_begin=1, val_interval=3)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
base_lr = 1.6e-3
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=dict(
type='AdamW', lr=base_lr, betas=(0.9, 0.999), weight_decay=0.05))
param_scheduler = [
dict(
type='LinearLR',
start_factor=0.1,
by_epoch=True,
begin=0,
end=30,
convert_to_iter_based=True),
dict(
type='CosineAnnealingLR',
T_max=70,
eta_min=base_lr / 100,
by_epoch=True,
begin=30,
end=100,
convert_to_iter_based=True)
]
default_hooks = dict(
checkpoint=dict(interval=3, max_keep_ckpts=5), logger=dict(interval=10))
# Default setting for scaling LR automatically
#   - `enable` means enable scaling LR automatically
#       or not by default.
#   - `base_batch_size` = (8 GPUs) x (8 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=64)
# runtime settings
# checkpoint_config = dict(interval=5)
work_dir = 'runs/'

huangapple
  • 本文由 发表于 2023年2月16日 12:42:01
  • 转载请务必保留本文链接:https://go.coder-hub.com/75467927.html
匿名

发表评论

匿名网友

:?: :razz: :sad: :evil: :!: :smile: :oops: :grin: :eek: :shock: :???: :cool: :lol: :mad: :twisted: :roll: :wink: :idea: :arrow: :neutral: :cry: :mrgreen:

确定