pretrained model 링크 쭉: https://github.com/microsoft/Swin-Transformer?tab=readme-ov-file
모델 하이퍼파라미터: https://github.com/microsoft/Swin-Transformer/blob/main/configs/swin/swin_tiny_patch4_window7_224_22k.yaml
실행파일
# 갱장히 간단하주
from mmengine.config import Config
from mmdet.utils import register_all_modules
from mmengine.runner import Runner
register_all_modules()
cfg = Config.fromfile('/data/ephemeral/home/hobbang/level2-objectdetection-cv-16/work_dirs/configs/hihi.py')
cfg.work_dir = '/data/ephemeral/home/hobbang/level2-objectdetection-cv-16/work_dirs/swin'
runner = Runner.from_cfg(cfg)
runner.train()
# 상속 방법이 굉장히 신기하주
_base_ = '/data/ephemeral/home/hobbang/level2-objectdetection-cv-16/baseline/mmdetection/configs/swin/retinanet_swin-t-p4-w7_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='LN', requires_grad=False)
),
bbox_head=dict(
num_classes=10
)
)
train_dataloader = dict(
batch_size=2
)
optim_wrapper = dict(optimizer=dict(lr=0.001))
# 초반에 크게 lr이 오르는건 linearLR로 warmup해서 그런거
coco_detection.py: 데이터셋은 원본을 수정하는게 편할듯?
# dataset settings
dataset_type = 'CocoDataset'
data_root = '/data/ephemeral/home/hobbang/level2-objectdetection-cv-16/dataset/'
classes = ("General trash", "Paper", "Paper pack", "Metal", "Glass", "Plastic", "Styrofoam", "Plastic bag", "Battery", "Clothing")
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# })
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='train.json',
data_prefix=dict(img=''),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args,
metainfo=dict(classes=classes)
))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='train.json',
data_prefix=dict(img=''),
metainfo=dict(classes=classes),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'train.json',
metric='bbox',
format_only=False,
backend_args=backend_args,
# classwise=True # classwise: 평가 지표를 개별적으로 계산할지 선택. True면 클래스별 분석 가능, False면 전체적인 성능 파악에 도움이 됨
)
# test_evaluator = ~~
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/'),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoMetric',
# metric='bbox',
# format_only=True,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# outfile_prefix='./work_dirs/coco_detection/test')