Skip to content

Commit

Permalink
update configs & demo & add bottom-up models
Browse files Browse the repository at this point in the history
  • Loading branch information
jin-s13 authored and innerlee committed Jul 12, 2020
1 parent 925741f commit ea5a0fc
Show file tree
Hide file tree
Showing 37 changed files with 2,743 additions and 261 deletions.
1 change: 1 addition & 0 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ test:
- pip install pillow==6.2.2
- pip install scipy
- pip install json_tricks
- pip install munkres
- pip install git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI
- pip install -e .
- python -c "import mmpose; print(mmpose.__version__)"
Expand Down
209 changes: 209 additions & 0 deletions configs/bottom_up/higherhrnet/coco/higher_hrnet32_coco_512x512.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,209 @@
log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=299, metric='mAP')

optimizer = dict(
type='Adam',
lr=0.0015,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[200, 260])
total_epochs = 300
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])

channel_cfg = dict(
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])

data_cfg = dict(
image_size=512,
base_size=256,
base_sigma=2,
heatmap_size=[128, 256],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
num_scales=2,
scale_aware_sigma=False,
)

# model settings
model = dict(
type='BottomUp',
pretrained='models/pytorch/imagenet/hrnet_w32-36af842e.pth',
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
),
keypoint_head=dict(
type='BottomUpHigherResolutionHead',
in_channels=32,
num_joints=17,
tag_per_joint=True,
extra=dict(final_conv_kerne=1, ),
num_deconv_layers=1,
num_deconv_filters=[32],
num_deconv_kernels=[4],
num_basic_blocks=4,
cat_output=[True],
with_ae_loss=[True, False]),
train_cfg=dict(
num_joints=channel_cfg['dataset_joints'],
img_size=data_cfg['image_size']),
test_cfg=dict(
num_joints=channel_cfg['dataset_joints'],
max_num_people=30,
scale_factor=[1],
with_heatmaps=[True, True],
with_ae=[True, False],
project2image=True,
nms_kernel=5,
nms_padding=2,
tag_per_joint=True,
detection_threshold=0.1,
tag_threshold=1,
use_detection_val=True,
ignore_too_much=False,
adjust=True,
refine=True,
flip_test=True),
loss_pose=dict(
type='MultiLossFactory',
num_joints=17,
num_stages=2,
ae_loss_type='exp',
with_ae_loss=[True, False],
push_loss_factor=[0.001, 0.001],
pull_loss_factor=[0.001, 0.001],
with_heatmaps_loss=[True, True],
heatmaps_loss_factor=[1.0, 1.0],
),
)

train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='BottomUpRandomAffine',
rot_factor=30,
scale_factor=[0.75, 1.5],
scale_type='short',
trans_factor=40),
dict(type='BottomUpRandomFlip', flip_prob=0.5),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='BottomUpGenerateTarget',
sigma=2,
max_num_people=30,
),
dict(
type='Collect',
keys=['img', 'joints', 'targets', 'masks'],
meta_keys=[]),
]

valid_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='BottomUpGetImgSize', test_scale_factor=[1]),
dict(
type='BottomUpResizeAlign',
transforms=[
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]),
dict(
type='Collect',
keys=[
'img',
],
meta_keys=[
'image_file', 'aug_data', 'test_scale_factor', 'base_size',
'center', 'scale', 'flip_index'
]),
]

data_root = 'data/coco'
data = dict(
samples_per_gpu=24,
workers_per_gpu=1,
train=dict(
type='BottomUpCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='BottomUpCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=valid_pipeline),
test=dict(
type='BottomUpCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=valid_pipeline),
)

loss = dict(
type='MultiLossFactory',
num_stages=32,
ae_loss_type='exp',
with_ae_loss=[True, False],
push_loss_factor=[0.001, 0.001],
pull_loss_factor=[0.001, 0.001],
with_heatmaps_loss=[True, True],
heatmaps_loss_factor=[1.0, 1.0],
)
22 changes: 14 additions & 8 deletions configs/top_down/hrnet/coco/hrnet_w32_coco_256x192.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,11 +70,11 @@
num_channels=(32, 64, 128, 256))),
),
keypoint_head=dict(
type='SimpleHead',
type='TopDownSimpleHead',
in_channels=32,
out_channels=channel_cfg['num_output_channels'],
num_deconv_layers=0,
extra=dict(final_conv_kerne=1, ),
extra=dict(final_conv_kernel=1, ),
),
train_cfg=dict(),
test_cfg=dict(
Expand Down Expand Up @@ -105,16 +105,20 @@

train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='RandomFlip', flip_prob=0.5),
dict(type='HalfBodyTransform', num_joints_half_body=8, prob_half_body=0.3),
dict(type='RandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='AffineTransform'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='GenerateTarget', sigma=2),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
Expand All @@ -126,7 +130,7 @@

valid_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='AffineTransform'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
Expand All @@ -143,6 +147,8 @@
]),
]

test_pipeline = valid_pipeline

data_root = 'data/coco'
data = dict(
samples_per_gpu=64,
Expand Down
18 changes: 11 additions & 7 deletions configs/top_down/resnet/coco/res50_coco_256x192.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@
pretrained='models/pytorch/imagenet/resnet50-19c8e357.pth',
backbone=dict(type='ResNet', depth=50),
keypoint_head=dict(
type='SimpleHead',
type='TopDownSimpleHead',
in_channels=2048,
out_channels=channel_cfg['num_output_channels'],
),
Expand Down Expand Up @@ -75,16 +75,20 @@

train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='RandomFlip', flip_prob=0.5),
dict(type='HalfBodyTransform', num_joints_half_body=8, prob_half_body=0.3),
dict(type='RandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='AffineTransform'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='GenerateTarget', sigma=2),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
Expand All @@ -96,7 +100,7 @@

valid_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='AffineTransform'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
Expand Down
Loading

0 comments on commit ea5a0fc

Please sign in to comment.