From 23ef988911b3b68fe8d5b21225995d6b392ee7b6 Mon Sep 17 00:00:00 2001 From: luxin Date: Thu, 13 Jun 2019 20:57:19 +0800 Subject: [PATCH] push code --- .gitignore | 111 +++ GETTING_STARTED.md | 256 ++++++ INSTALL.md | 98 ++ LICENSE | 201 ++++ MODEL_ZOO.md | 455 +++++++++ README.md | 111 +++ TECHNICAL_DETAILS.md | 99 ++ compile.sh | 38 + configs/cascade_mask_rcnn_r101_fpn_1x.py | 234 +++++ configs/cascade_mask_rcnn_r50_caffe_c4_1x.py | 236 +++++ configs/cascade_mask_rcnn_r50_fpn_1x.py | 234 +++++ .../cascade_mask_rcnn_x101_32x4d_fpn_1x.py | 236 +++++ .../cascade_mask_rcnn_x101_64x4d_fpn_1x.py | 236 +++++ configs/cascade_rcnn_r101_fpn_1x.py | 217 +++++ configs/cascade_rcnn_r50_caffe_c4_1x.py | 226 +++++ configs/cascade_rcnn_r50_fpn_1x.py | 217 +++++ configs/cascade_rcnn_x101_32x4d_fpn_1x.py | 219 +++++ configs/cascade_rcnn_x101_64x4d_fpn_1x.py | 219 +++++ configs/dcn/README.md | 43 + ...ascade_mask_rcnn_dconv_c3-c5_r50_fpn_1x.py | 237 +++++ .../cascade_rcnn_dconv_c3-c5_r50_fpn_1x.py | 220 +++++ .../dcn/faster_rcnn_dconv_c3-c5_r50_fpn_1x.py | 166 ++++ ...ster_rcnn_dconv_c3-c5_x101_32x4d_fpn_1x.py | 171 ++++ configs/dcn/faster_rcnn_dpool_r50_fpn_1x.py | 169 ++++ .../faster_rcnn_mdconv_c3-c5_r50_fpn_1x.py | 166 ++++ configs/dcn/faster_rcnn_mdpool_r50_fpn_1x.py | 169 ++++ .../dcn/mask_rcnn_dconv_c3-c5_r50_fpn_1x.py | 178 ++++ configs/fast_mask_rcnn_r101_fpn_1x.py | 137 +++ configs/fast_mask_rcnn_r50_caffe_c4_1x.py | 141 +++ configs/fast_mask_rcnn_r50_fpn_1x.py | 137 +++ configs/fast_rcnn_r101_fpn_1x.py | 122 +++ configs/fast_rcnn_r50_caffe_c4_1x.py | 130 +++ configs/fast_rcnn_r50_fpn_1x.py | 122 +++ configs/faster_rcnn_ohem_r50_fpn_1x.py | 163 ++++ configs/faster_rcnn_r101_fpn_1x.py | 163 ++++ configs/faster_rcnn_r50_caffe_c4_1x.py | 168 ++++ configs/faster_rcnn_r50_fpn_1x.py | 163 ++++ configs/faster_rcnn_x101_32x4d_fpn_1x.py | 165 ++++ configs/faster_rcnn_x101_64x4d_fpn_1x.py | 165 ++++ configs/fcos/README.md | 25 + ...train_640_800_r101_caffe_fpn_gn_2x_4gpu.py | 124 +++ ...os_mstrain_640_800_x101_64x4d_fpn_gn_2x.py | 125 +++ configs/fcos/fcos_r50_caffe_fpn_gn_1x_4gpu.py | 123 +++ configs/gn+ws/README.md | 54 ++ configs/gn+ws/faster_rcnn_r50_fpn_gn_ws_1x.py | 170 ++++ .../mask_rcnn_r50_fpn_gn_ws_20_23_24e.py | 187 ++++ configs/gn+ws/mask_rcnn_r50_fpn_gn_ws_2x.py | 187 ++++ .../mask_rcnn_x101_32x4d_fpn_gn_ws_2x.py | 189 ++++ configs/gn/README.md | 28 + configs/gn/mask_rcnn_r101_fpn_gn_2x.py | 184 ++++ configs/gn/mask_rcnn_r50_fpn_gn_2x.py | 184 ++++ configs/gn/mask_rcnn_r50_fpn_gn_contrib_2x.py | 184 ++++ configs/grid_rcnn_r50_fpn_2x.py | 177 ++++ configs/htc/README.md | 55 ++ ...-c5_mstrain_400_1400_x101_64x4d_fpn_20e.py | 263 ++++++ configs/htc/htc_r101_fpn_20e.py | 254 +++++ configs/htc/htc_r50_fpn_1x.py | 254 +++++ configs/htc/htc_r50_fpn_20e.py | 254 +++++ .../htc/htc_without_semantic_r50_fpn_1x.py | 236 +++++ configs/htc/htc_x101_32x4d_fpn_20e_16gpu.py | 256 ++++++ configs/htc/htc_x101_64x4d_fpn_20e_16gpu.py | 256 ++++++ configs/mask_rcnn_r101_fpn_1x.py | 175 ++++ configs/mask_rcnn_r50_caffe_c4_1x.py | 179 ++++ configs/mask_rcnn_r50_fpn_1x.py | 175 ++++ configs/mask_rcnn_x101_32x4d_fpn_1x.py | 177 ++++ configs/mask_rcnn_x101_64x4d_fpn_1x.py | 177 ++++ .../faster_rcnn_r50_fpn_1x_voc0712.py | 164 ++++ configs/pascal_voc/ssd300_voc.py | 134 +++ configs/pascal_voc/ssd512_voc.py | 134 +++ configs/retinanet_r101_fpn_1x.py | 120 +++ configs/retinanet_r50_fpn_1x.py | 120 +++ configs/retinanet_x101_32x4d_fpn_1x.py | 122 +++ configs/retinanet_x101_64x4d_fpn_1x.py | 122 +++ configs/rpn_r101_fpn_1x.py | 121 +++ configs/rpn_r50_caffe_c4_1x.py | 121 +++ configs/rpn_r50_fpn_1x.py | 121 +++ configs/rpn_x101_32x4d_fpn_1x.py | 123 +++ configs/rpn_x101_64x4d_fpn_1x.py | 123 +++ configs/ssd300_coco.py | 131 +++ configs/ssd512_coco.py | 131 +++ demo/coco_test_12510.jpg | Bin 0 -> 183096 bytes mmdet/__init__.py | 3 + mmdet/apis/__init__.py | 8 + mmdet/apis/env.py | 69 ++ mmdet/apis/inference.py | 143 +++ mmdet/apis/train.py | 197 ++++ mmdet/core/__init__.py | 7 + mmdet/core/anchor/__init__.py | 4 + mmdet/core/anchor/anchor_generator.py | 84 ++ mmdet/core/anchor/anchor_target.py | 186 ++++ mmdet/core/bbox/__init__.py | 20 + mmdet/core/bbox/assign_sampling.py | 33 + mmdet/core/bbox/assigners/__init__.py | 5 + mmdet/core/bbox/assigners/assign_result.py | 19 + mmdet/core/bbox/assigners/base_assigner.py | 8 + mmdet/core/bbox/assigners/max_iou_assigner.py | 152 +++ mmdet/core/bbox/bbox_target.py | 73 ++ mmdet/core/bbox/geometry.py | 63 ++ mmdet/core/bbox/samplers/__init__.py | 14 + mmdet/core/bbox/samplers/base_sampler.py | 78 ++ mmdet/core/bbox/samplers/combined_sampler.py | 16 + .../samplers/instance_balanced_pos_sampler.py | 41 + .../bbox/samplers/iou_balanced_neg_sampler.py | 62 ++ mmdet/core/bbox/samplers/ohem_sampler.py | 73 ++ mmdet/core/bbox/samplers/pseudo_sampler.py | 26 + mmdet/core/bbox/samplers/random_sampler.py | 53 ++ mmdet/core/bbox/samplers/sampling_result.py | 24 + mmdet/core/bbox/transforms.py | 180 ++++ mmdet/core/evaluation/__init__.py | 18 + mmdet/core/evaluation/bbox_overlaps.py | 49 + mmdet/core/evaluation/class_names.py | 103 +++ mmdet/core/evaluation/coco_utils.py | 149 +++ mmdet/core/evaluation/eval_hooks.py | 162 ++++ mmdet/core/evaluation/mean_ap.py | 378 ++++++++ mmdet/core/evaluation/recall.py | 185 ++++ mmdet/core/loss/__init__.py | 12 + mmdet/core/loss/losses.py | 143 +++ mmdet/core/mask/__init__.py | 4 + mmdet/core/mask/grid_target.py | 82 ++ mmdet/core/mask/mask_target.py | 36 + mmdet/core/mask/utils.py | 30 + mmdet/core/post_processing/__init__.py | 8 + mmdet/core/post_processing/bbox_nms.py | 64 ++ mmdet/core/post_processing/merge_augs.py | 96 ++ mmdet/core/utils/__init__.py | 7 + mmdet/core/utils/dist_utils.py | 57 ++ mmdet/core/utils/misc.py | 37 + mmdet/datasets/__init__.py | 16 + mmdet/datasets/coco.py | 118 +++ mmdet/datasets/concat_dataset.py | 22 + mmdet/datasets/custom.py | 322 +++++++ mmdet/datasets/extra_aug.py | 163 ++++ mmdet/datasets/loader/__init__.py | 4 + mmdet/datasets/loader/build_loader.py | 46 + mmdet/datasets/loader/sampler.py | 159 ++++ mmdet/datasets/repeat_dataset.py | 19 + mmdet/datasets/transforms.py | 147 +++ mmdet/datasets/utils.py | 116 +++ mmdet/datasets/voc.py | 18 + mmdet/datasets/xml_style.py | 76 ++ mmdet/models/__init__.py | 18 + mmdet/models/anchor_heads/__init__.py | 7 + mmdet/models/anchor_heads/anchor_head.py | 284 ++++++ mmdet/models/anchor_heads/fcos_head.py | 371 ++++++++ mmdet/models/anchor_heads/retina_head.py | 88 ++ mmdet/models/anchor_heads/rpn_head.py | 104 +++ mmdet/models/anchor_heads/ssd_head.py | 191 ++++ mmdet/models/backbones/__init__.py | 5 + mmdet/models/backbones/resnet.py | 474 ++++++++++ mmdet/models/backbones/resnext.py | 218 +++++ mmdet/models/backbones/ssd_vgg.py | 130 +++ mmdet/models/bbox_heads/__init__.py | 4 + mmdet/models/bbox_heads/bbox_head.py | 208 +++++ mmdet/models/bbox_heads/convfc_bbox_head.py | 246 +++++ mmdet/models/builder.py | 56 ++ mmdet/models/detectors/__init__.py | 18 + mmdet/models/detectors/base.py | 140 +++ mmdet/models/detectors/cascade_rcnn.py | 379 ++++++++ mmdet/models/detectors/fast_rcnn.py | 50 + mmdet/models/detectors/faster_rcnn.py | 27 + mmdet/models/detectors/fcos.py | 16 + mmdet/models/detectors/grid_rcnn.py | 179 ++++ mmdet/models/detectors/htc.py | 396 ++++++++ mmdet/models/detectors/mask_rcnn.py | 31 + mmdet/models/detectors/retinanet.py | 16 + mmdet/models/detectors/rpn.py | 92 ++ mmdet/models/detectors/single_stage.py | 70 ++ mmdet/models/detectors/test_mixins.py | 153 ++++ mmdet/models/detectors/two_stage.py | 240 +++++ mmdet/models/mask_heads/__init__.py | 6 + mmdet/models/mask_heads/fcn_mask_head.py | 169 ++++ .../models/mask_heads/fused_semantic_head.py | 102 +++ mmdet/models/mask_heads/grid_head.py | 174 ++++ mmdet/models/mask_heads/htc_mask_head.py | 38 + mmdet/models/necks/__init__.py | 3 + mmdet/models/necks/fpn.py | 136 +++ mmdet/models/registry.py | 44 + mmdet/models/roi_extractors/__init__.py | 3 + mmdet/models/roi_extractors/single_level.py | 88 ++ mmdet/models/shared_heads/__init__.py | 3 + mmdet/models/shared_heads/res_layer.py | 69 ++ mmdet/models/utils/__init__.py | 12 + mmdet/models/utils/conv_module.py | 163 ++++ mmdet/models/utils/conv_ws.py | 46 + mmdet/models/utils/norm.py | 55 ++ mmdet/models/utils/scale.py | 12 + mmdet/models/utils/weight_init.py | 46 + mmdet/ops/__init__.py | 16 + mmdet/ops/dcn/__init__.py | 13 + mmdet/ops/dcn/functions/__init__.py | 0 mmdet/ops/dcn/functions/deform_conv.py | 181 ++++ mmdet/ops/dcn/functions/deform_pool.py | 69 ++ mmdet/ops/dcn/modules/__init__.py | 0 mmdet/ops/dcn/modules/deform_conv.py | 157 ++++ mmdet/ops/dcn/modules/deform_pool.py | 172 ++++ mmdet/ops/dcn/setup.py | 15 + mmdet/ops/dcn/src/deform_conv_cuda.cpp | 695 ++++++++++++++ mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu | 866 ++++++++++++++++++ mmdet/ops/dcn/src/deform_pool_cuda.cpp | 87 ++ mmdet/ops/dcn/src/deform_pool_cuda_kernel.cu | 364 ++++++++ mmdet/ops/nms/__init__.py | 3 + mmdet/ops/nms/nms_wrapper.py | 78 ++ mmdet/ops/nms/setup.py | 84 ++ mmdet/ops/nms/src/nms_cpu.cpp | 71 ++ mmdet/ops/nms/src/nms_cuda.cpp | 17 + mmdet/ops/nms/src/nms_kernel.cu | 131 +++ mmdet/ops/nms/src/soft_nms_cpu.pyx | 127 +++ mmdet/ops/roi_align/__init__.py | 4 + mmdet/ops/roi_align/functions/__init__.py | 0 mmdet/ops/roi_align/functions/roi_align.py | 61 ++ mmdet/ops/roi_align/gradcheck.py | 29 + mmdet/ops/roi_align/modules/__init__.py | 0 mmdet/ops/roi_align/modules/roi_align.py | 16 + mmdet/ops/roi_align/setup.py | 12 + mmdet/ops/roi_align/src/roi_align_cuda.cpp | 85 ++ mmdet/ops/roi_align/src/roi_align_kernel.cu | 294 ++++++ mmdet/ops/roi_pool/__init__.py | 4 + mmdet/ops/roi_pool/functions/__init__.py | 0 mmdet/ops/roi_pool/functions/roi_pool.py | 55 ++ mmdet/ops/roi_pool/gradcheck.py | 15 + mmdet/ops/roi_pool/modules/__init__.py | 0 mmdet/ops/roi_pool/modules/roi_pool.py | 14 + mmdet/ops/roi_pool/setup.py | 12 + mmdet/ops/roi_pool/src/roi_pool_cuda.cpp | 86 ++ mmdet/ops/roi_pool/src/roi_pool_kernel.cu | 156 ++++ mmdet/ops/sigmoid_focal_loss/__init__.py | 3 + .../sigmoid_focal_loss/functions/__init__.py | 0 .../functions/sigmoid_focal_loss.py | 42 + .../sigmoid_focal_loss/modules/__init__.py | 0 .../modules/sigmoid_focal_loss.py | 23 + mmdet/ops/sigmoid_focal_loss/setup.py | 12 + .../src/sigmoid_focal_loss.cpp | 43 + .../src/sigmoid_focal_loss_cuda.cu | 169 ++++ setup.py | 112 +++ tools/analyze_logs.py | 178 ++++ tools/coco_eval.py | 28 + tools/convert_datasets/pascal_voc.py | 140 +++ tools/dist_test.sh | 10 + tools/dist_train.sh | 9 + tools/publish_model.py | 34 + tools/slurm_test.sh | 23 + tools/slurm_train.sh | 23 + tools/test.py | 189 ++++ tools/train.py | 95 ++ tools/upgrade_model_version.py | 42 + tools/voc_eval.py | 62 ++ 246 files changed, 28470 insertions(+) create mode 100644 .gitignore create mode 100644 GETTING_STARTED.md create mode 100644 INSTALL.md create mode 100644 LICENSE create mode 100644 MODEL_ZOO.md create mode 100644 README.md create mode 100644 TECHNICAL_DETAILS.md create mode 100755 compile.sh create mode 100644 configs/cascade_mask_rcnn_r101_fpn_1x.py create mode 100644 configs/cascade_mask_rcnn_r50_caffe_c4_1x.py create mode 100644 configs/cascade_mask_rcnn_r50_fpn_1x.py create mode 100644 configs/cascade_mask_rcnn_x101_32x4d_fpn_1x.py create mode 100644 configs/cascade_mask_rcnn_x101_64x4d_fpn_1x.py create mode 100644 configs/cascade_rcnn_r101_fpn_1x.py create mode 100644 configs/cascade_rcnn_r50_caffe_c4_1x.py create mode 100644 configs/cascade_rcnn_r50_fpn_1x.py create mode 100644 configs/cascade_rcnn_x101_32x4d_fpn_1x.py create mode 100644 configs/cascade_rcnn_x101_64x4d_fpn_1x.py create mode 100644 configs/dcn/README.md create mode 100644 configs/dcn/cascade_mask_rcnn_dconv_c3-c5_r50_fpn_1x.py create mode 100644 configs/dcn/cascade_rcnn_dconv_c3-c5_r50_fpn_1x.py create mode 100644 configs/dcn/faster_rcnn_dconv_c3-c5_r50_fpn_1x.py create mode 100644 configs/dcn/faster_rcnn_dconv_c3-c5_x101_32x4d_fpn_1x.py create mode 100644 configs/dcn/faster_rcnn_dpool_r50_fpn_1x.py create mode 100644 configs/dcn/faster_rcnn_mdconv_c3-c5_r50_fpn_1x.py create mode 100644 configs/dcn/faster_rcnn_mdpool_r50_fpn_1x.py create mode 100644 configs/dcn/mask_rcnn_dconv_c3-c5_r50_fpn_1x.py create mode 100644 configs/fast_mask_rcnn_r101_fpn_1x.py create mode 100644 configs/fast_mask_rcnn_r50_caffe_c4_1x.py create mode 100644 configs/fast_mask_rcnn_r50_fpn_1x.py create mode 100644 configs/fast_rcnn_r101_fpn_1x.py create mode 100644 configs/fast_rcnn_r50_caffe_c4_1x.py create mode 100644 configs/fast_rcnn_r50_fpn_1x.py create mode 100644 configs/faster_rcnn_ohem_r50_fpn_1x.py create mode 100644 configs/faster_rcnn_r101_fpn_1x.py create mode 100644 configs/faster_rcnn_r50_caffe_c4_1x.py create mode 100644 configs/faster_rcnn_r50_fpn_1x.py create mode 100644 configs/faster_rcnn_x101_32x4d_fpn_1x.py create mode 100644 configs/faster_rcnn_x101_64x4d_fpn_1x.py create mode 100644 configs/fcos/README.md create mode 100644 configs/fcos/fcos_mstrain_640_800_r101_caffe_fpn_gn_2x_4gpu.py create mode 100644 configs/fcos/fcos_mstrain_640_800_x101_64x4d_fpn_gn_2x.py create mode 100644 configs/fcos/fcos_r50_caffe_fpn_gn_1x_4gpu.py create mode 100644 configs/gn+ws/README.md create mode 100644 configs/gn+ws/faster_rcnn_r50_fpn_gn_ws_1x.py create mode 100644 configs/gn+ws/mask_rcnn_r50_fpn_gn_ws_20_23_24e.py create mode 100644 configs/gn+ws/mask_rcnn_r50_fpn_gn_ws_2x.py create mode 100644 configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws_2x.py create mode 100644 configs/gn/README.md create mode 100644 configs/gn/mask_rcnn_r101_fpn_gn_2x.py create mode 100644 configs/gn/mask_rcnn_r50_fpn_gn_2x.py create mode 100644 configs/gn/mask_rcnn_r50_fpn_gn_contrib_2x.py create mode 100644 configs/grid_rcnn_r50_fpn_2x.py create mode 100644 configs/htc/README.md create mode 100644 configs/htc/htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e.py create mode 100644 configs/htc/htc_r101_fpn_20e.py create mode 100644 configs/htc/htc_r50_fpn_1x.py create mode 100644 configs/htc/htc_r50_fpn_20e.py create mode 100644 configs/htc/htc_without_semantic_r50_fpn_1x.py create mode 100644 configs/htc/htc_x101_32x4d_fpn_20e_16gpu.py create mode 100644 configs/htc/htc_x101_64x4d_fpn_20e_16gpu.py create mode 100644 configs/mask_rcnn_r101_fpn_1x.py create mode 100644 configs/mask_rcnn_r50_caffe_c4_1x.py create mode 100644 configs/mask_rcnn_r50_fpn_1x.py create mode 100644 configs/mask_rcnn_x101_32x4d_fpn_1x.py create mode 100644 configs/mask_rcnn_x101_64x4d_fpn_1x.py create mode 100644 configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py create mode 100644 configs/pascal_voc/ssd300_voc.py create mode 100644 configs/pascal_voc/ssd512_voc.py create mode 100644 configs/retinanet_r101_fpn_1x.py create mode 100644 configs/retinanet_r50_fpn_1x.py create mode 100644 configs/retinanet_x101_32x4d_fpn_1x.py create mode 100644 configs/retinanet_x101_64x4d_fpn_1x.py create mode 100644 configs/rpn_r101_fpn_1x.py create mode 100644 configs/rpn_r50_caffe_c4_1x.py create mode 100644 configs/rpn_r50_fpn_1x.py create mode 100644 configs/rpn_x101_32x4d_fpn_1x.py create mode 100644 configs/rpn_x101_64x4d_fpn_1x.py create mode 100644 configs/ssd300_coco.py create mode 100644 configs/ssd512_coco.py create mode 100644 demo/coco_test_12510.jpg create mode 100644 mmdet/__init__.py create mode 100644 mmdet/apis/__init__.py create mode 100644 mmdet/apis/env.py create mode 100644 mmdet/apis/inference.py create mode 100644 mmdet/apis/train.py create mode 100644 mmdet/core/__init__.py create mode 100644 mmdet/core/anchor/__init__.py create mode 100644 mmdet/core/anchor/anchor_generator.py create mode 100644 mmdet/core/anchor/anchor_target.py create mode 100644 mmdet/core/bbox/__init__.py create mode 100644 mmdet/core/bbox/assign_sampling.py create mode 100644 mmdet/core/bbox/assigners/__init__.py create mode 100644 mmdet/core/bbox/assigners/assign_result.py create mode 100644 mmdet/core/bbox/assigners/base_assigner.py create mode 100644 mmdet/core/bbox/assigners/max_iou_assigner.py create mode 100644 mmdet/core/bbox/bbox_target.py create mode 100644 mmdet/core/bbox/geometry.py create mode 100644 mmdet/core/bbox/samplers/__init__.py create mode 100644 mmdet/core/bbox/samplers/base_sampler.py create mode 100644 mmdet/core/bbox/samplers/combined_sampler.py create mode 100644 mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py create mode 100644 mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py create mode 100644 mmdet/core/bbox/samplers/ohem_sampler.py create mode 100644 mmdet/core/bbox/samplers/pseudo_sampler.py create mode 100644 mmdet/core/bbox/samplers/random_sampler.py create mode 100644 mmdet/core/bbox/samplers/sampling_result.py create mode 100644 mmdet/core/bbox/transforms.py create mode 100644 mmdet/core/evaluation/__init__.py create mode 100644 mmdet/core/evaluation/bbox_overlaps.py create mode 100644 mmdet/core/evaluation/class_names.py create mode 100644 mmdet/core/evaluation/coco_utils.py create mode 100644 mmdet/core/evaluation/eval_hooks.py create mode 100644 mmdet/core/evaluation/mean_ap.py create mode 100644 mmdet/core/evaluation/recall.py create mode 100644 mmdet/core/loss/__init__.py create mode 100644 mmdet/core/loss/losses.py create mode 100644 mmdet/core/mask/__init__.py create mode 100644 mmdet/core/mask/grid_target.py create mode 100644 mmdet/core/mask/mask_target.py create mode 100644 mmdet/core/mask/utils.py create mode 100644 mmdet/core/post_processing/__init__.py create mode 100644 mmdet/core/post_processing/bbox_nms.py create mode 100644 mmdet/core/post_processing/merge_augs.py create mode 100644 mmdet/core/utils/__init__.py create mode 100644 mmdet/core/utils/dist_utils.py create mode 100644 mmdet/core/utils/misc.py create mode 100644 mmdet/datasets/__init__.py create mode 100644 mmdet/datasets/coco.py create mode 100644 mmdet/datasets/concat_dataset.py create mode 100644 mmdet/datasets/custom.py create mode 100644 mmdet/datasets/extra_aug.py create mode 100644 mmdet/datasets/loader/__init__.py create mode 100644 mmdet/datasets/loader/build_loader.py create mode 100644 mmdet/datasets/loader/sampler.py create mode 100644 mmdet/datasets/repeat_dataset.py create mode 100644 mmdet/datasets/transforms.py create mode 100644 mmdet/datasets/utils.py create mode 100644 mmdet/datasets/voc.py create mode 100644 mmdet/datasets/xml_style.py create mode 100644 mmdet/models/__init__.py create mode 100644 mmdet/models/anchor_heads/__init__.py create mode 100644 mmdet/models/anchor_heads/anchor_head.py create mode 100644 mmdet/models/anchor_heads/fcos_head.py create mode 100644 mmdet/models/anchor_heads/retina_head.py create mode 100644 mmdet/models/anchor_heads/rpn_head.py create mode 100644 mmdet/models/anchor_heads/ssd_head.py create mode 100644 mmdet/models/backbones/__init__.py create mode 100644 mmdet/models/backbones/resnet.py create mode 100644 mmdet/models/backbones/resnext.py create mode 100644 mmdet/models/backbones/ssd_vgg.py create mode 100644 mmdet/models/bbox_heads/__init__.py create mode 100644 mmdet/models/bbox_heads/bbox_head.py create mode 100644 mmdet/models/bbox_heads/convfc_bbox_head.py create mode 100644 mmdet/models/builder.py create mode 100644 mmdet/models/detectors/__init__.py create mode 100644 mmdet/models/detectors/base.py create mode 100644 mmdet/models/detectors/cascade_rcnn.py create mode 100644 mmdet/models/detectors/fast_rcnn.py create mode 100644 mmdet/models/detectors/faster_rcnn.py create mode 100644 mmdet/models/detectors/fcos.py create mode 100644 mmdet/models/detectors/grid_rcnn.py create mode 100644 mmdet/models/detectors/htc.py create mode 100644 mmdet/models/detectors/mask_rcnn.py create mode 100644 mmdet/models/detectors/retinanet.py create mode 100644 mmdet/models/detectors/rpn.py create mode 100644 mmdet/models/detectors/single_stage.py create mode 100644 mmdet/models/detectors/test_mixins.py create mode 100644 mmdet/models/detectors/two_stage.py create mode 100644 mmdet/models/mask_heads/__init__.py create mode 100644 mmdet/models/mask_heads/fcn_mask_head.py create mode 100644 mmdet/models/mask_heads/fused_semantic_head.py create mode 100644 mmdet/models/mask_heads/grid_head.py create mode 100644 mmdet/models/mask_heads/htc_mask_head.py create mode 100644 mmdet/models/necks/__init__.py create mode 100644 mmdet/models/necks/fpn.py create mode 100644 mmdet/models/registry.py create mode 100644 mmdet/models/roi_extractors/__init__.py create mode 100644 mmdet/models/roi_extractors/single_level.py create mode 100644 mmdet/models/shared_heads/__init__.py create mode 100644 mmdet/models/shared_heads/res_layer.py create mode 100644 mmdet/models/utils/__init__.py create mode 100644 mmdet/models/utils/conv_module.py create mode 100644 mmdet/models/utils/conv_ws.py create mode 100644 mmdet/models/utils/norm.py create mode 100644 mmdet/models/utils/scale.py create mode 100644 mmdet/models/utils/weight_init.py create mode 100644 mmdet/ops/__init__.py create mode 100644 mmdet/ops/dcn/__init__.py create mode 100644 mmdet/ops/dcn/functions/__init__.py create mode 100644 mmdet/ops/dcn/functions/deform_conv.py create mode 100644 mmdet/ops/dcn/functions/deform_pool.py create mode 100644 mmdet/ops/dcn/modules/__init__.py create mode 100644 mmdet/ops/dcn/modules/deform_conv.py create mode 100644 mmdet/ops/dcn/modules/deform_pool.py create mode 100644 mmdet/ops/dcn/setup.py create mode 100644 mmdet/ops/dcn/src/deform_conv_cuda.cpp create mode 100644 mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu create mode 100644 mmdet/ops/dcn/src/deform_pool_cuda.cpp create mode 100644 mmdet/ops/dcn/src/deform_pool_cuda_kernel.cu create mode 100644 mmdet/ops/nms/__init__.py create mode 100644 mmdet/ops/nms/nms_wrapper.py create mode 100644 mmdet/ops/nms/setup.py create mode 100644 mmdet/ops/nms/src/nms_cpu.cpp create mode 100644 mmdet/ops/nms/src/nms_cuda.cpp create mode 100644 mmdet/ops/nms/src/nms_kernel.cu create mode 100644 mmdet/ops/nms/src/soft_nms_cpu.pyx create mode 100644 mmdet/ops/roi_align/__init__.py create mode 100644 mmdet/ops/roi_align/functions/__init__.py create mode 100644 mmdet/ops/roi_align/functions/roi_align.py create mode 100644 mmdet/ops/roi_align/gradcheck.py create mode 100644 mmdet/ops/roi_align/modules/__init__.py create mode 100644 mmdet/ops/roi_align/modules/roi_align.py create mode 100644 mmdet/ops/roi_align/setup.py create mode 100644 mmdet/ops/roi_align/src/roi_align_cuda.cpp create mode 100644 mmdet/ops/roi_align/src/roi_align_kernel.cu create mode 100644 mmdet/ops/roi_pool/__init__.py create mode 100644 mmdet/ops/roi_pool/functions/__init__.py create mode 100644 mmdet/ops/roi_pool/functions/roi_pool.py create mode 100644 mmdet/ops/roi_pool/gradcheck.py create mode 100644 mmdet/ops/roi_pool/modules/__init__.py create mode 100644 mmdet/ops/roi_pool/modules/roi_pool.py create mode 100644 mmdet/ops/roi_pool/setup.py create mode 100644 mmdet/ops/roi_pool/src/roi_pool_cuda.cpp create mode 100644 mmdet/ops/roi_pool/src/roi_pool_kernel.cu create mode 100644 mmdet/ops/sigmoid_focal_loss/__init__.py create mode 100644 mmdet/ops/sigmoid_focal_loss/functions/__init__.py create mode 100644 mmdet/ops/sigmoid_focal_loss/functions/sigmoid_focal_loss.py create mode 100644 mmdet/ops/sigmoid_focal_loss/modules/__init__.py create mode 100644 mmdet/ops/sigmoid_focal_loss/modules/sigmoid_focal_loss.py create mode 100644 mmdet/ops/sigmoid_focal_loss/setup.py create mode 100644 mmdet/ops/sigmoid_focal_loss/src/sigmoid_focal_loss.cpp create mode 100644 mmdet/ops/sigmoid_focal_loss/src/sigmoid_focal_loss_cuda.cu create mode 100644 setup.py create mode 100644 tools/analyze_logs.py create mode 100644 tools/coco_eval.py create mode 100644 tools/convert_datasets/pascal_voc.py create mode 100755 tools/dist_test.sh create mode 100755 tools/dist_train.sh create mode 100644 tools/publish_model.py create mode 100755 tools/slurm_test.sh create mode 100755 tools/slurm_train.sh create mode 100644 tools/test.py create mode 100644 tools/train.py create mode 100644 tools/upgrade_model_version.py create mode 100644 tools/voc_eval.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e7a290e --- /dev/null +++ b/.gitignore @@ -0,0 +1,111 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +# cython generated cpp +mmdet/ops/nms/src/soft_nms_cpu.cpp +mmdet/version.py +data +.vscode +.idea diff --git a/GETTING_STARTED.md b/GETTING_STARTED.md new file mode 100644 index 0000000..b5aac9d --- /dev/null +++ b/GETTING_STARTED.md @@ -0,0 +1,256 @@ +# Getting Started + +This page provides basic tutorials about the usage of mmdetection. +For installation instructions, please see [INSTALL.md](INSTALL.md). + +## Inference with pretrained models + +We provide testing scripts to evaluate a whole dataset (COCO, PASCAL VOC, etc.), +and also some high-level apis for easier integration to other projects. + +### Test a dataset + +- [x] single GPU testing +- [x] multiple GPU testing +- [x] visualize detection results + +You can use the following commands to test a dataset. + +```shell +# single-gpu testing +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] [--show] + +# multi-gpu testing +./tools/dist_test.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${GPU_NUM} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] +``` + +Optional arguments: +- `RESULT_FILE`: Filename of the output results in pickle format. If not specified, the results will not be saved to a file. +- `EVAL_METRICS`: Items to be evaluated on the results. Allowed values are: `proposal_fast`, `proposal`, `bbox`, `segm`, `keypoints`. +- `--show`: If specified, detection results will be ploted on the images and shown in a new window. Only applicable for single GPU testing. + +Examples: + +Assume that you have already downloaded the checkpoints to `checkpoints/`. + +1. Test Faster R-CNN and show the results. + +```shell +python tools/test.py configs/faster_rcnn_r50_fpn_1x.py \ + checkpoints/faster_rcnn_r50_fpn_1x_20181010-3d1b3351.pth \ + --show +``` + +2. Test Mask R-CNN and evaluate the bbox and mask AP. + +```shell +python tools/test.py configs/mask_rcnn_r50_fpn_1x.py \ + checkpoints/mask_rcnn_r50_fpn_1x_20181010-069fa190.pth \ + --out results.pkl --eval bbox segm +``` + +3. Test Mask R-CNN with 8 GPUs, and evaluate the bbox and mask AP. + +```shell +./tools/dist_test.sh configs/mask_rcnn_r50_fpn_1x.py \ + checkpoints/mask_rcnn_r50_fpn_1x_20181010-069fa190.pth \ + 8 --out results.pkl --eval bbox segm +``` + +### High-level APIs for testing images. + +Here is an example of building the model and test given images. + +```python +from mmdet.apis import init_detector, inference_detector, show_result + +config_file = 'configs/faster_rcnn_r50_fpn_1x.py' +checkpoint_file = 'checkpoints/faster_rcnn_r50_fpn_1x_20181010-3d1b3351.pth' + +# build the model from a config file and a checkpoint file +model = init_detector(config_file, checkpoint_file) + +# test a single image and show the results +img = 'test.jpg' # or img = mmcv.imread(img), which will only load it once +result = inference_detector(model, img) +show_result(img, result, model.CLASSES) + +# test a list of images and write the results to image files +imgs = ['test1.jpg', 'test2.jpg'] +for i, result in enumerate(inference_detector(model, imgs, device='cuda:0')): + show_result(imgs[i], result, model.CLASSES, out_file='result_{}.jpg'.format(i)) +``` + + +## Train a model + +mmdetection implements distributed training and non-distributed training, +which uses `MMDistributedDataParallel` and `MMDataParallel` respectively. + +All outputs (log files and checkpoints) will be saved to the working directory, +which is specified by `work_dir` in the config file. + +**\*Important\***: The default learning rate in config files is for 8 GPUs. +If you use less or more than 8 GPUs, you need to set the learning rate proportional +to the GPU num, e.g., 0.01 for 4 GPUs and 0.04 for 16 GPUs. + +### Train with a single GPU + +```shell +python tools/train.py ${CONFIG_FILE} +``` + +If you want to specify the working directory in the command, you can add an argument `--work_dir ${YOUR_WORK_DIR}`. + +### Train with multiple GPUs + +```shell +./tools/dist_train.sh ${CONFIG_FILE} ${GPU_NUM} [optional arguments] +``` + +Optional arguments are: + +- `--validate` (recommended): Perform evaluation at every k (default=1) epochs during the training. +- `--work_dir ${WORK_DIR}`: Override the working directory specified in the config file. +- `--resume_from ${CHECKPOINT_FILE}`: Resume from a previous checkpoint file. + +### Train with multiple machines + +If you run mmdetection on a cluster managed with [slurm](https://slurm.schedmd.com/), you can just use the script `slurm_train.sh`. + +```shell +./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} ${WORK_DIR} [${GPUS}] +``` + +Here is an example of using 16 GPUs to train Mask R-CNN on the dev partition. + +```shell +./tools/slurm_train.sh dev mask_r50_1x configs/mask_rcnn_r50_fpn_1x.py /nfs/xxxx/mask_rcnn_r50_fpn_1x 16 +``` + +You can check [slurm_train.sh](tools/slurm_train.sh) for full arguments and environment variables. + +If you have just multiple machines connected with ethernet, you can refer to +pytorch [launch utility](https://pytorch.org/docs/stable/distributed_deprecated.html#launch-utility). +Usually it is slow if you do not have high speed networking like infiniband. + + +## How-to + +### Use my own datasets + +The simplest way is to convert your dataset to existing dataset formats (COCO or PASCAL VOC). + +Here we show an example of adding a custom dataset of 5 classes, assuming it is also in COCO format. + +In `mmdet/datasets/my_dataset.py`: + +```python +from .coco import CocoDataset + + +class MyDataset(CocoDataset): + + CLASSES = ('a', 'b', 'c', 'd', 'e') +``` + +In `mmdet/datasets/__init__.py`: + +```python +from .my_dataset import MyDataset +``` + +Then you can use `MyDataset` in config files, with the same API as CocoDataset. + + +It is also fine if you do not want to convert the annotation format to COCO or PASCAL format. +Actually, we define a simple annotation format and all existing datasets are +processed to be compatible with it, either online or offline. + +The annotation of a dataset is a list of dict, each dict corresponds to an image. +There are 3 field `filename` (relative path), `width`, `height` for testing, +and an additional field `ann` for training. `ann` is also a dict containing at least 2 fields: +`bboxes` and `labels`, both of which are numpy arrays. Some datasets may provide +annotations like crowd/difficult/ignored bboxes, we use `bboxes_ignore` and `labels_ignore` +to cover them. + +Here is an example. +``` +[ + { + 'filename': 'a.jpg', + 'width': 1280, + 'height': 720, + 'ann': { + 'bboxes': (n, 4), + 'labels': (n, ), + 'bboxes_ignore': (k, 4), + 'labels_ignore': (k, ) (optional field) + } + }, + ... +] +``` + +There are two ways to work with custom datasets. + +- online conversion + + You can write a new Dataset class inherited from `CustomDataset`, and overwrite two methods + `load_annotations(self, ann_file)` and `get_ann_info(self, idx)`, + like [CocoDataset](mmdet/datasets/coco.py) and [VOCDataset](mmdet/datasets/voc.py). + +- offline conversion + + You can convert the annotation format to the expected format above and save it to + a pickle or json file, like [pascal_voc.py](tools/convert_datasets/pascal_voc.py). + Then you can simply use `CustomDataset`. + +### Develop new components + +We basically categorize model components into 4 types. + +- backbone: usually a FCN network to extract feature maps, e.g., ResNet, MobileNet. +- neck: the component between backbones and heads, e.g., FPN, PAFPN. +- head: the component for specific tasks, e.g., bbox prediction and mask prediction. +- roi extractor: the part for extracting RoI features from feature maps, e.g., RoI Align. + +Here we show how to develop new components with an example of MobileNet. + +1. Create a new file `mmdet/models/backbones/mobilenet.py`. + +```python +import torch.nn as nn + +from ..registry import BACKBONES + + +@BACKBONES.register +class MobileNet(nn.Module): + + def __init__(self, arg1, arg2): + pass + + def forward(x): # should return a tuple + pass +``` + +2. Import the module in `mmdet/models/backbones/__init__.py`. + +```python +from .mobilenet import MobileNet +``` + +3. Use it in your config file. + +```python +model = dict( + ... + backbone=dict( + type='MobileNet', + arg1=xxx, + arg2=xxx), + ... +``` + +For more information on how it works, you can refer to [TECHNICAL_DETAILS.md](TECHNICAL_DETAILS.md) (TODO). diff --git a/INSTALL.md b/INSTALL.md new file mode 100644 index 0000000..55b8b02 --- /dev/null +++ b/INSTALL.md @@ -0,0 +1,98 @@ +## Installation + +### Requirements + +- Linux +- Python 3.5+ ([Say goodbye to Python2](https://python3statement.org/)) +- PyTorch 1.0+ or PyTorch-nightly +- CUDA 9.0+ +- NCCL 2+ +- GCC 4.9+ +- [mmcv](https://github.com/open-mmlab/mmcv) + +We have tested the following versions of OS and softwares: + +- OS: Ubuntu 16.04/18.04 and CentOS 7.2 +- CUDA: 9.0/9.2/10.0 +- NCCL: 2.1.15/2.2.13/2.3.7/2.4.2 +- GCC: 4.9/5.3/5.4/7.3 + +### Install mmdetection + +a. Create a conda virtual environment and activate it. Then install Cython. + +```shell +conda create -n open-mmlab python=3.7 -y +source activate open-mmlab + +conda install cython +``` + +b. Install PyTorch stable or nightly and torchvision following the [official instructions](https://pytorch.org/). + +c. Clone the mmdetection repository. + +```shell +git clone https://github.com/open-mmlab/mmdetection.git +cd mmdetection +``` + +d. Compile cuda extensions. + +```shell +./compile.sh +``` + +e. Install mmdetection (other dependencies will be installed automatically). + +```shell +python setup.py develop +# or "pip install -e ." +``` + +Note: + +1. It is recommended that you run the step e each time you pull some updates from github. If there are some updates of the C/CUDA codes, you also need to run step d. +The git commit id will be written to the version number with step e, e.g. 0.6.0+2e7045c. The version will also be saved in trained models. + +2. Following the above instructions, mmdetection is installed on `dev` mode, any modifications to the code will take effect without installing it again. + +### Prepare COCO dataset. + +It is recommended to symlink the dataset root to `$MMDETECTION/data`. + +``` +mmdetection +├── mmdet +├── tools +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ ├── train2017 +│ │ ├── val2017 +│ │ ├── test2017 +│ ├── VOCdevkit +│ │ ├── VOC2007 +│ │ ├── VOC2012 + +``` + +### Scripts +[Here](https://gist.github.com/hellock/bf23cd7348c727d69d48682cb6909047) is +a script for setting up mmdetection with conda. + +### Notice +You can run `python(3) setup.py develop` or `pip install -e .` to install mmdetection if you want to make modifications to it frequently. + +If there are more than one mmdetection on your machine, and you want to use them alternatively. +Please insert the following code to the main file +```python +import os.path as osp +import sys +sys.path.insert(0, osp.join(osp.dirname(osp.abspath(__file__)), '../')) +``` +or run the following command in the terminal of corresponding folder. +```shell +export PYTHONPATH=`pwd`:$PYTHONPATH +``` diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/MODEL_ZOO.md b/MODEL_ZOO.md new file mode 100644 index 0000000..9ba86ab --- /dev/null +++ b/MODEL_ZOO.md @@ -0,0 +1,455 @@ +# Benchmark and Model Zoo + +## Environment + +### Hardware + +- 8 NVIDIA Tesla V100 GPUs +- Intel Xeon 4114 CPU @ 2.20GHz + +### Software environment + +- Python 3.6 / 3.7 +- PyTorch Nightly +- CUDA 9.0.176 +- CUDNN 7.0.4 +- NCCL 2.1.15 + +## Mirror sites + +We use AWS as the main site to host our model zoo, and maintain a mirror on aliyun. +You can replace `https://s3.ap-northeast-2.amazonaws.com/open-mmlab` with `https://open-mmlab.oss-cn-beijing.aliyuncs.com` in model urls. + +## Common settings + +- All FPN baselines and RPN-C4 baselines were trained using 8 GPU with a batch size of 16 (2 images per GPU). Other C4 baselines were trained using 8 GPU with a batch size of 8 (1 image per GPU). +- All models were trained on `coco_2017_train`, and tested on the `coco_2017_val`. +- We use distributed training and BN layer stats are fixed. +- We adopt the same training schedules as Detectron. 1x indicates 12 epochs and 2x indicates 24 epochs, which corresponds to slightly less iterations than Detectron and the difference can be ignored. +- All pytorch-style pretrained backbones on ImageNet are from PyTorch model zoo. +- For fair comparison with other codebases, we report the GPU memory as the maximum value of `torch.cuda.max_memory_allocated()` for all 8 GPUs. Note that this value is usually less than what `nvidia-smi` shows. +- We report the inference time as the overall time including data loading, network forwarding and post processing. + + +## Baselines + +More models with different backbones will be added to the model zoo. + +### RPN + +| Backbone | Style | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | AR1000 | Download | +| :-------------: | :-----: | :-----: | :------: | :-----------------: | :------------: | :----: | :------------------------------------------------------------------------------------------------------------------------: | +| R-50-C4 | caffe | 1x | - | - | 20.5 | 51.1 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/rpn_r50_caffe_c4_1x-ea7d3428.pth) | +| R-50-C4 | caffe | 2x | 2.2 | 0.17 | 20.3 | 52.2 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/rpn_r50_caffe_c4_2x-c6d5b958.pth) | +| R-50-C4 | pytorch | 1x | - | - | 20.1 | 50.2 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/rpn_r50_c4_1x-eb38972b.pth) | +| R-50-C4 | pytorch | 2x | - | - | 20.0 | 51.1 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/rpn_r50_c4_2x-3d4c1e14.pth) | +| R-50-FPN | caffe | 1x | 3.3 | 0.253 | 16.9 | 58.2 | - | +| R-50-FPN | pytorch | 1x | 3.5 | 0.276 | 17.7 | 57.1 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/rpn_r50_fpn_1x_20181010-4a9c0712.pth) | +| R-50-FPN | pytorch | 2x | - | - | - | 57.6 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/rpn_r50_fpn_2x_20181010-88a4a471.pth) | +| R-101-FPN | caffe | 1x | 5.2 | 0.379 | 13.9 | 59.4 | - | +| R-101-FPN | pytorch | 1x | 5.4 | 0.396 | 14.4 | 58.6 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/rpn_r101_fpn_1x_20181129-f50da4bd.pth) | +| R-101-FPN | pytorch | 2x | - | - | - | 59.1 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/rpn_r101_fpn_2x_20181129-e42c6c9a.pth) | +| X-101-32x4d-FPN | pytorch | 1x | 6.6 | 0.589 | 11.8 | 59.4 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/rpn_x101_32x4d_fpn_1x_20181218-7e379d26.pth) | +| X-101-32x4d-FPN | pytorch | 2x | - | - | - | 59.9 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/rpn_x101_32x4d_fpn_2x_20181218-0510af40.pth) | +| X-101-64x4d-FPN | pytorch | 1x | 9.5 | 0.955 | 8.3 | 59.8 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/rpn_x101_64x4d_fpn_1x_20181218-c1a24f1f.pth) | +| X-101-64x4d-FPN | pytorch | 2x | - | - | - | 60.0 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/rpn_x101_64x4d_fpn_2x_20181218-c22bdd70.pth) | + +### Faster R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Download | +| :-------------: | :-----: | :-----: | :------: | :-----------------: | :------------: | :----: | :--------------------------------------------------------------------------------------------------------------------------------: | +| R-50-C4 | caffe | 1x | - | - | 9.5 | 34.9 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/faster_rcnn_r50_caffe_c4_1x-75ecfdfa.pth) | +| R-50-C4 | caffe | 2x | 4.0 | 0.39 | 9.3 | 36.5 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/faster_rcnn_r50_caffe_c4_2x-71c67f27.pth) | +| R-50-C4 | pytorch | 1x | - | - | 9.3 | 33.9 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/faster_rcnn_r50_c4_1x-642cf91f.pth) | +| R-50-C4 | pytorch | 2x | - | - | 9.4 | 35.9 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/faster_rcnn_r50_c4_2x-6e4fdf4f.pth) | +| R-50-FPN | caffe | 1x | 3.6 | 0.333 | 13.5 | 36.6 | - | +| R-50-FPN | pytorch | 1x | 3.8 | 0.353 | 13.6 | 36.4 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/faster_rcnn_r50_fpn_1x_20181010-3d1b3351.pth) | +| R-50-FPN | pytorch | 2x | - | - | - | 37.7 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/faster_rcnn_r50_fpn_2x_20181010-443129e1.pth) | +| R-101-FPN | caffe | 1x | 5.5 | 0.465 | 11.5 | 38.8 | - | +| R-101-FPN | pytorch | 1x | 5.7 | 0.474 | 11.9 | 38.5 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/faster_rcnn_r101_fpn_1x_20181129-d1468807.pth) | +| R-101-FPN | pytorch | 2x | - | - | - | 39.4 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/faster_rcnn_r101_fpn_2x_20181129-73e7ade7.pth) | +| X-101-32x4d-FPN | pytorch | 1x | 6.9 | 0.672 | 10.3 | 40.1 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/faster_rcnn_x101_32x4d_fpn_1x_20181218-ad81c133.pth) | +| X-101-32x4d-FPN | pytorch | 2x | - | - | - | 40.4 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/faster_rcnn_x101_32x4d_fpn_2x_20181218-0ed58946.pth) | +| X-101-64x4d-FPN | pytorch | 1x | 9.8 | 1.040 | 7.3 | 41.3 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/faster_rcnn_x101_64x4d_fpn_1x_20181218-c9c69c8f.pth) | +| X-101-64x4d-FPN | pytorch | 2x | - | - | - | 40.7 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/faster_rcnn_x101_64x4d_fpn_2x_20181218-fe94f9b8.pth) | + +### Mask R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | mask AP | Download | +| :-------------: | :-----: | :-----: | :------: | :-----------------: | :------------: | :----: | :-----: | :------------------------------------------------------------------------------------------------------------------------------: | +| R-50-C4 | caffe | 1x | - | - | 8.1 | 35.9 | 31.5 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/mask_rcnn_r50_caffe_c4_1x-02a4ad3b.pth) | +| R-50-C4 | caffe | 2x | 4.2 | 0.43 | 8.1 | 37.9 | 32.9 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/mask_rcnn_r50_caffe_c4_2x-d150973a.pth) | +| R-50-C4 | pytorch | 1x | - | - | 7.9 | 35.1 | 31.2 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/mask_rcnn_r50_c4_1x-a83bdd40.pth) | +| R-50-C4 | pytorch | 2x | - | - | 8.0 | 37.2 | 32.5 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/mask_rcnn_r50_c4_2x-3cf169a9.pth) | +| R-50-FPN | caffe | 1x | 3.8 | 0.430 | 10.2 | 37.4 | 34.3 | - | +| R-50-FPN | pytorch | 1x | 3.9 | 0.453 | 10.6 | 37.3 | 34.2 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/mask_rcnn_r50_fpn_1x_20181010-069fa190.pth) | +| R-50-FPN | pytorch | 2x | - | - | - | 38.5 | 35.1 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/mask_rcnn_r50_fpn_2x_20181010-41d35c05.pth) | +| R-101-FPN | caffe | 1x | 5.7 | 0.534 | 9.4 | 39.9 | 36.1 | - | +| R-101-FPN | pytorch | 1x | 5.8 | 0.571 | 9.5 | 39.4 | 35.9 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/mask_rcnn_r101_fpn_1x_20181129-34ad1961.pth) | +| R-101-FPN | pytorch | 2x | - | - | - | 40.3 | 36.5 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/mask_rcnn_r101_fpn_2x_20181129-a254bdfc.pth) | +| X-101-32x4d-FPN | pytorch | 1x | 7.1 | 0.759 | 8.3 | 41.1 | 37.1 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/mask_rcnn_x101_32x4d_fpn_1x_20181218-44e635cc.pth) | +| X-101-32x4d-FPN | pytorch | 2x | - | - | - | 41.4 | 37.1 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/mask_rcnn_x101_32x4d_fpn_2x_20181218-f023dffa.pth) | +| X-101-64x4d-FPN | pytorch | 1x | 10.0 | 1.102 | 6.5 | 42.1 | 38.0 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/mask_rcnn_x101_64x4d_fpn_1x_20181218-cb159987.pth) | +| X-101-64x4d-FPN | pytorch | 2x | - | - | - | 42.0 | 37.7 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/mask_rcnn_x101_64x4d_fpn_2x_20181218-ea936e44.pth) | + +### Fast R-CNN (with pre-computed proposals) + +| Backbone | Style | Type | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | mask AP | Download | +| :-------: | :-----: | :----: | :-----: | :------: | :-----------------: | :------------: | :----: | :-----: | :-----------------------------------------------------------------------------------------------------------------------------: | +| R-50-C4 | caffe | Faster | 1x | - | - | 6.7 | 35.0 | - | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fast_rcnn_r50_caffe_c4_1x-0ef9a60b.pth) | +| R-50-C4 | caffe | Faster | 2x | 3.8 | 0.34 | 6.6 | 36.4 | - | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fast_rcnn_r50_c4_2x-657a9fc6.pth) | +| R-50-C4 | pytorch | Faster | 1x | - | - | 6.3 | 34.2 | - | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fast_rcnn_r50_c4_1x-2bc00ca9.pth) | +| R-50-C4 | pytorch | Faster | 2x | - | - | 6.1 | 35.8 | - | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fast_rcnn_r50_caffe_c4_2x-9171d0fc.pth) | +| R-50-FPN | caffe | Faster | 1x | 3.3 | 0.242 | 18.4 | 36.6 | - | - | +| R-50-FPN | pytorch | Faster | 1x | 3.5 | 0.250 | 16.5 | 35.8 | - | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fast_rcnn_r50_fpn_1x_20181010-08160859.pth) | +| R-50-C4 | caffe | Mask | 1x | - | - | 8.1 | 35.9 | 31.5 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fast_mask_rcnn_r50_caffe_c4_1x-b43f7f3c.pth) | +| R-50-C4 | caffe | Mask | 2x | 4.2 | 0.43 | 8.1 | 37.9 | 32.9 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fast_mask_rcnn_r50_caffe_c4_2x-e3580184.pth) | +| R-50-C4 | pytorch | Mask | 1x | - | - | 7.9 | 35.1 | 31.2 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fast_mask_rcnn_r50_c4_1x-bc7fa8c8.pth) | +| R-50-C4 | pytorch | Mask | 2x | - | - | 8.0 | 37.2 | 32.5 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fast_mask_rcnn_r50_fpn_2x_20181010-5048cb03.pth) | +| R-50-FPN | pytorch | Faster | 2x | - | - | - | 37.1 | - | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fast_rcnn_r50_fpn_2x_20181010-d263ada5.pth) | +| R-101-FPN | caffe | Faster | 1x | 5.2 | 0.355 | 14.4 | 38.6 | - | - | +| R-101-FPN | pytorch | Faster | 1x | 5.4 | 0.388 | 13.2 | 38.1 | - | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fast_rcnn_r101_fpn_1x_20181129-ffaa2eb0.pth) | +| R-101-FPN | pytorch | Faster | 2x | - | - | - | 38.8 | - | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fast_rcnn_r101_fpn_2x_20181129-9dba92ce.pth) | +| R-50-FPN | caffe | Mask | 1x | 3.4 | 0.328 | 12.8 | 37.3 | 34.5 | - | +| R-50-FPN | pytorch | Mask | 1x | 3.5 | 0.346 | 12.7 | 36.8 | 34.1 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fast_mask_rcnn_r50_fpn_1x_20181010-e030a38f.pth) | +| R-50-FPN | pytorch | Mask | 2x | - | - | - | 37.9 | 34.8 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fast_mask_rcnn_r50_fpn_2x_20181010-5048cb03.pth) | +| R-101-FPN | caffe | Mask | 1x | 5.2 | 0.429 | 11.2 | 39.4 | 36.1 | - | +| R-101-FPN | pytorch | Mask | 1x | 5.4 | 0.462 | 10.9 | 38.9 | 35.8 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fast_mask_rcnn_r101_fpn_1x_20181129-2273fa9b.pth) | +| R-101-FPN | pytorch | Mask | 2x | - | - | - | 39.9 | 36.4 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fast_mask_rcnn_r101_fpn_2x_20181129-bf63ec5e.pth) | + +### RetinaNet + +| Backbone | Style | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Download | +| :-------------: | :-----: | :-----: | :------: | :-----------------: | :------------: | :----: | :------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | caffe | 1x | 3.4 | 0.285 | 12.5 | 35.8 | - | +| R-50-FPN | pytorch | 1x | 3.6 | 0.308 | 12.1 | 35.6 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/retinanet_r50_fpn_1x_20181125-7b0c2548.pth) | +| R-50-FPN | pytorch | 2x | - | - | - | 36.5 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/retinanet_r50_fpn_2x_20181125-8b724df2.pth) | +| R-101-FPN | caffe | 1x | 5.3 | 0.410 | 10.4 | 37.8 | - | +| R-101-FPN | pytorch | 1x | 5.5 | 0.429 | 10.9 | 37.7 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/retinanet_r101_fpn_1x_20181129-f016f384.pth) | +| R-101-FPN | pytorch | 2x | - | - | - | 38.1 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/retinanet_r101_fpn_2x_20181129-72c14526.pth) | +| X-101-32x4d-FPN | pytorch | 1x | 6.7 | 0.632 | 9.3 | 39.0 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/retinanet_x101_32x4d_fpn_1x_20190501-967812ba.pth) | +| X-101-32x4d-FPN | pytorch | 2x | - | - | - | 39.3 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/retinanet_x101_32x4d_fpn_2x_20181218-8596452d.pth) | +| X-101-64x4d-FPN | pytorch | 1x | 9.6 | 0.993 | 7.0 | 40.0 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/retinanet_x101_64x4d_fpn_1x_20181218-a0a22662.pth) | +| X-101-64x4d-FPN | pytorch | 2x | - | - | - | 39.6 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/retinanet_x101_64x4d_fpn_2x_20181218-5e88d045.pth) | + +### Cascade R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Download | +| :-------------: | :-----: | :-----: | :------: | :-----------------: | :------------: | :----: | :---------------------------------------------------------------------------------------------------------------------------------: | +| R-50-C4 | caffe | 1x | 8.7 | 0.92 | 5.0 | 38.7 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/cascade_rcnn_r50_caffe_c4_1x-7c85c62b.pth) | +| R-50-FPN | caffe | 1x | 3.9 | 0.464 | 10.9 | 40.5 | - | +| R-50-FPN | pytorch | 1x | 4.1 | 0.455 | 11.9 | 40.4 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/cascade_rcnn_r50_fpn_1x_20190501-3b6211ab.pth) | +| R-50-FPN | pytorch | 20e | - | - | - | 41.1 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/cascade_rcnn_r50_fpn_20e_20181123-db483a09.pth) | +| R-101-FPN | caffe | 1x | 5.8 | 0.569 | 9.6 | 42.4 | - | +| R-101-FPN | pytorch | 1x | 6.0 | 0.584 | 10.3 | 42.0 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/cascade_rcnn_r101_fpn_1x_20181129-d64ebac7.pth) | +| R-101-FPN | pytorch | 20e | - | - | - | 42.5 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/cascade_rcnn_r101_fpn_20e_20181129-b46dcede.pth) | +| X-101-32x4d-FPN | pytorch | 1x | 7.2 | 0.770 | 8.9 | 43.6 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/cascade_rcnn_x101_32x4d_fpn_1x_20190501-af628be5.pth) | +| X-101-32x4d-FPN | pytorch | 20e | - | - | - | 44.0 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/cascade_rcnn_x101_32x4d_fpn_2x_20181218-28f73c4c.pth) | +| X-101-64x4d-FPN | pytorch | 1x | 10.0 | 1.133 | 6.7 | 44.5 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/cascade_rcnn_x101_64x4d_fpn_1x_20181218-e2dc376a.pth) | +| X-101-64x4d-FPN | pytorch | 20e | - | - | - | 44.7 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/cascade_rcnn_x101_64x4d_fpn_2x_20181218-5add321e.pth) | + +### Cascade Mask R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | mask AP | Download | +| :-------------: | :-----: | :-----: | :------: | :-----------------: | :------------: | :----: | :-----: | :---------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-C4 | caffe | 1x | 9.1 | 0.99 | 4.5 | 39.3 | 32.8 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/cascade_mask_rcnn_r50_caffe_c4_1x-f72cc254.pth) | +| R-50-FPN | caffe | 1x | 5.1 | 0.692 | 7.6 | 40.9 | 35.5 | - | +| R-50-FPN | pytorch | 1x | 5.3 | 0.683 | 7.4 | 41.2 | 35.7 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/cascade_mask_rcnn_r50_fpn_1x_20181123-88b170c9.pth) | +| R-50-FPN | pytorch | 20e | - | - | - | 42.3 | 36.6 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/cascade_mask_rcnn_r50_fpn_20e_20181123-6e0c9713.pth) | +| R-101-FPN | caffe | 1x | 7.0 | 0.803 | 7.2 | 43.1 | 37.2 | - | +| R-101-FPN | pytorch | 1x | 7.2 | 0.807 | 6.8 | 42.6 | 37.0 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/cascade_mask_rcnn_r101_fpn_1x_20181129-64f00602.pth) | +| R-101-FPN | pytorch | 20e | - | - | - | 43.3 | 37.6 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/cascade_mask_rcnn_r101_fpn_20e_20181129-cb85151d.pth) | +| X-101-32x4d-FPN | pytorch | 1x | 8.4 | 0.976 | 6.6 | 44.4 | 38.2 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/cascade_mask_rcnn_x101_32x4d_fpn_1x_20181218-1d944c89.pth) | +| X-101-32x4d-FPN | pytorch | 20e | - | - | - | 44.7 | 38.6 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/cascade_mask_rcnn_x101_32x4d_fpn_20e_20181218-761a3473.pth) | +| X-101-64x4d-FPN | pytorch | 1x | 11.4 | 1.33 | 5.3 | 45.4 | 39.1 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/cascade_mask_rcnn_x101_64x4d_fpn_1x_20190501-827e0a70.pth) | +| X-101-64x4d-FPN | pytorch | 20e | - | - | - | 45.7 | 39.4 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/cascade_mask_rcnn_x101_64x4d_fpn_20e_20181218-630773a7.pth) | + +**Notes:** + +- The `20e` schedule in Cascade (Mask) R-CNN indicates decreasing the lr at 16 and 19 epochs, with a total of 20 epochs. + +### Hybrid Task Cascade (HTC) + +| Backbone | Style | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | mask AP | Download | +| :-------------: | :-----: | :-----: | :------: | :-----------------: | :------------: | :----: | :-----: | :-----------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | 1x | 7.4 | 0.936 | 4.1 | 42.1 | 37.3 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/htc/htc_r50_fpn_1x_20190408-878c1712.pth) | +| R-50-FPN | pytorch | 20e | - | - | - | 43.2 | 38.1 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/htc/htc_r50_fpn_20e_20190408-c03b7015.pth) | +| R-101-FPN | pytorch | 20e | 9.3 | 1.051 | 4.0 | 44.9 | 39.4 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/htc/htc_r101_fpn_20e_20190408-a2e586db.pth) | +| X-101-32x4d-FPN | pytorch | 20e | 5.8 | 0.769 | 3.8 | 46.1 | 40.3 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/htc/htc_x101_32x4d_fpn_20e_20190408-9eae4d0b.pth) | +| X-101-64x4d-FPN | pytorch | 20e | 7.5 | 1.120 | 3.5 | 46.9 | 40.8 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/htc/htc_x101_64x4d_fpn_20e_20190408-497f2561.pth) | + +**Notes:** + +- Please refer to [Hybrid Task Cascade](configs/htc/README.md) for details and more a powerful model (50.7/43.9). + +### SSD + +| Backbone | Size | Style | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Download | +| :------: | :---: | :---: | :-----: | :------: | :-----------------: | :------------: | :----: | :-------------------------------------------------------------------------------------------------------------------------------: | +| VGG16 | 300 | caffe | 120e | 3.5 | 0.256 | 25.9 / 34.6 | 25.7 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ssd300_coco_vgg16_caffe_120e_20181221-84d7110b.pth) | +| VGG16 | 512 | caffe | 120e | 7.6 | 0.412 | 20.7 / 25.4 | 29.3 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ssd512_coco_vgg16_caffe_120e_20181221-d48b0be8.pth) | + +### SSD (PASCAL VOC) + +| Backbone | Size | Style | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Download | +| :------: | :---: | :---: | :-----: | :------: | :-----------------: | :------------: | :----: | :------------------------------------------------------------------------------------------------------------------------------: | +| VGG16 | 300 | caffe | 240e | 2.5 | 0.159 | 35.7 / 53.6 | 77.5 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ssd300_voc_vgg16_caffe_240e_20190501-7160d09a.pth) | +| VGG16 | 512 | caffe | 240e | 4.3 | 0.214 | 27.5 / 35.9 | 80.0 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ssd512_voc_vgg16_caffe_240e_20190501-ff194be1.pth) | + +**Notes:** + +- `cudnn.benchmark` is set as `True` for SSD training and testing. +- Inference time is reported for batch size = 1 and batch size = 8. +- The speed difference between VOC and COCO is caused by model parameters and nms. + +### Group Normalization (GN) + +Please refer to [Group Normalization](configs/gn/README.md) for details. + +### Weight Standardization + +Please refer to [Weight Standardization](configs/gn+ws/README.md) for details. + +### Deformable Convolution v2 + +Please refer to [Deformable Convolutional Networks](configs/dcn/README.md) for details. + + +## Comparison with Detectron and maskrcnn-benchmark + +We compare mmdetection with [Detectron](https://github.com/facebookresearch/Detectron) +and [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark). The backbone used is R-50-FPN. + +In general, mmdetection has 3 advantages over Detectron. + +- **Higher performance** (especially in terms of mask AP) +- **Faster training speed** +- **Memory efficient** + +### Performance + +Detectron and maskrcnn-benchmark use caffe-style ResNet as the backbone. +We report results using both caffe-style (weights converted from +[here](https://github.com/facebookresearch/Detectron/blob/master/MODEL_ZOO.md#imagenet-pretrained-models)) +and pytorch-style (weights from the official model zoo) ResNet backbone, +indicated as *pytorch-style results* / *caffe-style results*. + +We find that pytorch-style ResNet usually converges slower than caffe-style ResNet, +thus leading to slightly lower results in 1x schedule, but the final results +of 2x schedule is higher. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeLr schdDetectronmaskrcnn-benchmarkmmdetection
RPN1x57.2-57.1 / 58.2
2x--57.6 / -
Faster R-CNN1x36.736.836.4 / 36.6
2x37.9-37.7 / -
Mask R-CNN1x37.7 & 33.937.8 & 34.237.3 & 34.2 / 37.4 & 34.3
2x38.6 & 34.5-38.5 & 35.1 / -
Fast R-CNN1x36.4-35.8 / 36.6
2x36.8-37.1 / -
Fast R-CNN (w/mask)1x37.3 & 33.7-36.8 & 34.1 / 37.3 & 34.5
2x37.7 & 34.0-37.9 & 34.8 / -
+ +### Training Speed + +The training speed is measure with s/iter. The lower, the better. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDetectron (P1001)maskrcnn-benchmark (V100)mmdetection (V1002)
RPN0.416-0.253
Faster R-CNN0.5440.3530.333
Mask R-CNN0.8890.4540.430
Fast R-CNN0.285-0.242
Fast R-CNN (w/mask)0.377-0.328
+ +\*1. Facebook's Big Basin servers (P100/V100) is slightly faster than the servers we use. mmdetection can also run slightly faster on FB's servers. + +\*2. For fair comparison, we list the caffe-style results here. + + +### Inference Speed + +The inference speed is measured with fps (img/s) on a single GPU. The higher, the better. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDetectron (P100)maskrcnn-benchmark (V100)mmdetection (V100)
RPN12.5-16.9
Faster R-CNN10.37.913.5
Mask R-CNN8.57.710.2
Fast R-CNN12.5-18.4
Fast R-CNN (w/mask)9.9-12.8
+ +### Training memory + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDetectronmaskrcnn-benchmarkmmdetection
RPN6.4-3.3
Faster R-CNN7.24.43.6
Mask R-CNN8.65.23.8
Fast R-CNN6.0-3.3
Fast R-CNN (w/mask)7.9-3.4
+ +There is no doubt that maskrcnn-benchmark and mmdetection is more memory efficient than Detectron, +and the main advantage is PyTorch itself. We also perform some memory optimizations to push it forward. + +Note that Caffe2 and PyTorch have different apis to obtain memory usage with different implementations. +For all codebases, `nvidia-smi` shows a larger memory usage than the reported number in the above table. + + + diff --git a/README.md b/README.md new file mode 100644 index 0000000..a230819 --- /dev/null +++ b/README.md @@ -0,0 +1,111 @@ +# Grid R-CNN + +An mmdetection based implementation of updated Grid R-CNN published on CVPR 2019. Original paper is [here](https://arxiv.org/abs/1811.12030), details of updates can be seen in this [technical report](Grid_RCNN_Plus.pdf). + +## Installation + +This project is based on mmdetection object detection framework. + +### Requirements + +- Python 3.5+ ([Say goodbye to Python2](https://python3statement.org/)) +- PyTorch 1.0+ or PyTorch-nightly +- CUDA 9.0+ +- GCC 4.9+ +- [mmcv](https://github.com/open-mmlab/mmcv) + +### Install Grid R-CNN with mmdetection + +a. Create a conda virtual environment and activate it. Then install Cython. + +```shell +conda create -n open-mmlab python=3.7 -y +source activate open-mmlab + +conda install cython +``` + +b. Install PyTorch stable or nightly and torchvision following the [official instructions](https://pytorch.org/). + +c. Clone the this repository. + +d. Compile cuda extensions. + +```shell +./compile.sh +``` + +e. Setup mmdetection (other dependencies will be installed automatically). + +```shell +python setup.py develop +# or "pip install -e ." +``` + +### Dataset Preparation + +It is recommended to symlink the dataset root to your project. + +``` +Grid_RCNN +├── mmdet +├── tools +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ ├── train2017 +│ │ ├── val2017 +│ │ ├── test2017 +│ ├── VOCdevkit +│ │ ├── VOC2007 +│ │ ├── VOC2012 + +``` + +## Training and Testing + +We provide training and testing scripts and config files for Grid R-CNN. An example configuration file is [here](configs/grid_rcnn_r50_fpn_2x.py), corresponding checkpoint is [here](https://drive.google.com/file/d/1RCtNjb_JruBtl6sCq5w_XXN1e0ksn_f6/view?usp=sharing). Note that the batchsize we used is 64(32 GPUs/2 images per GPU), you could adjust the learning rate and warmup schedule according to your batchsize. + +Please see [GETTING_STARTED.md](GETTING_STARTED.md) for more basic usage of mmdetection. + +## Results + +Training on Res50/101 FPN backbone and Testing on COCO minival. Inference speed is tested on our TITANXp GPU cluster. + +Method |lr sched| AP | AP@0.5 | AP@0.75 | AP@S | AP@M | AP@L |Inference speed +:--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: +Res50-FPN | 2x | 37.4% | 59.3% | 40.3% | 21.8% | 40.9% | 47.9% | 0.09s +Res50-FPN-Grid R-CNN | 2x | 40.4% | 58.6% | 43.7% | 23.2% | 44.2% | 52.4% | 0.11s +Res101-FPN | 2x | 39.5% | 61.2% | 43.1% | 22.7% | 43.7% | 50.8% | 0.12s +Res101-FPN-Grid R-CNN | 2x | 42.0% | 60.6% | 45.4% | 24.1% | 46.2% | 55.2% | 0.13s + + +## Citation + +If you use our codebase or models in your research, please cite this project. + + +``` +@InProceedings{Lu_2019_CVPR, +author = {Lu, Xin and Li, Buyu and Yue, Yuxin and Li, Quanquan and Yan, Junjie}, +title = {Grid R-CNN}, +booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, +month = {June}, +year = {2019} +} + +@misc{mmdetection2018, + author = {Kai Chen and Jiangmiao Pang and Jiaqi Wang and Yu Xiong and Xiaoxiao Li + and Shuyang Sun and Wansen Feng and Ziwei Liu and Jianping Shi and + Wanli Ouyang and Chen Change Loy and Dahua Lin}, + title = {mmdetection}, + howpublished = {\url{https://github.com/open-mmlab/mmdetection}}, + year = {2018} +} +``` + +## License +Grid R-CNN is released under the [Apache 2.0 license](https://github.com/STVIR/pysot/blob/master/LICENSE). + + diff --git a/TECHNICAL_DETAILS.md b/TECHNICAL_DETAILS.md new file mode 100644 index 0000000..85f1854 --- /dev/null +++ b/TECHNICAL_DETAILS.md @@ -0,0 +1,99 @@ +## Overview + +In this section, we will introduce the main units of training a detector: +data loading, model and iteration pipeline. + +## Data loading + +Following typical conventions, we use `Dataset` and `DataLoader` for data loading +with multiple workers. `Dataset` returns a dict of data items corresponding +the arguments of models' forward method. +Since the data in object detection may not be the same size (image size, gt bbox size, etc.), +we introduce a new `DataContainer` type in `mmcv` to help collect and distribute +data of different size. +See [here](https://github.com/open-mmlab/mmcv/blob/master/mmcv/parallel/data_container.py) for more details. + +## Model + +In mmdetection, model components are basically categorized as 4 types. + +- backbone: usually a FCN network to extract feature maps, e.g., ResNet. +- neck: the part between backbones and heads, e.g., FPN, ASPP. +- head: the part for specific tasks, e.g., bbox prediction and mask prediction. +- roi extractor: the part for extracting features from feature maps, e.g., RoI Align. + +We also write implement some general detection pipelines with the above components, +such as `SingleStageDetector` and `TwoStageDetector`. + +### Build a model with basic components + +Following some basic pipelines (e.g., two-stage detectors), the model structure +can be customized through config files with no pains. + +If we want to implement some new components, e.g, the path aggregation +FPN structure in [Path Aggregation Network for Instance Segmentation](https://arxiv.org/abs/1803.01534), there are two things to do. + +1. create a new file in `mmdet/models/necks/pafpn.py`. + + ```python + class PAFPN(nn.Module): + + def __init__(self, + in_channels, + out_channels, + num_outs, + start_level=0, + end_level=-1, + add_extra_convs=False): + pass + + def forward(self, inputs): + # implementation is ignored + pass + ``` + +2. modify the config file from + + ```python + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5) + ``` + + to + + ```python + neck=dict( + type='PAFPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5) + ``` + +We will release more components (backbones, necks, heads) for research purpose. + +### Write a new model + +To write a new detection pipeline, you need to inherit from `BaseDetector`, +which defines the following abstract methods. + +- `extract_feat()`: given an image batch of shape (n, c, h, w), extract the feature map(s). +- `forward_train()`: forward method of the training mode +- `simple_test()`: single scale testing without augmentation +- `aug_test()`: testing with augmentation (multi-scale, flip, etc.) + +[TwoStageDetector](https://github.com/hellock/mmdetection/blob/master/mmdet/models/detectors/two_stage.py) +is a good example which shows how to do that. + +## Iteration pipeline + +We adopt distributed training for both single machine and multiple machines. +Supposing that the server has 8 GPUs, 8 processes will be started and each process runs on a single GPU. + +Each process keeps an isolated model, data loader, and optimizer. +Model parameters are only synchronized once at the begining. +After a forward and backward pass, gradients will be allreduced among all GPUs, +and the optimizer will update model parameters. +Since the gradients are allreduced, the model parameter stays the same for all processes after the iteration. diff --git a/compile.sh b/compile.sh new file mode 100755 index 0000000..335cf51 --- /dev/null +++ b/compile.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +PYTHON=${PYTHON:-"python"} + +echo "Building roi align op..." +cd mmdet/ops/roi_align +if [ -d "build" ]; then + rm -r build +fi +$PYTHON setup.py build_ext --inplace + +echo "Building roi pool op..." +cd ../roi_pool +if [ -d "build" ]; then + rm -r build +fi +$PYTHON setup.py build_ext --inplace + +echo "Building nms op..." +cd ../nms +if [ -d "build" ]; then + rm -r build +fi +$PYTHON setup.py build_ext --inplace + +echo "Building dcn..." +cd ../dcn +if [ -d "build" ]; then + rm -r build +fi +$PYTHON setup.py build_ext --inplace + +echo "Building sigmoid focal loss op..." +cd ../sigmoid_focal_loss +if [ -d "build" ]; then + rm -r build +fi +$PYTHON setup.py build_ext --inplace diff --git a/configs/cascade_mask_rcnn_r101_fpn_1x.py b/configs/cascade_mask_rcnn_r101_fpn_1x.py new file mode 100644 index 0000000..9915c2e --- /dev/null +++ b/configs/cascade_mask_rcnn_r101_fpn_1x.py @@ -0,0 +1,234 @@ +# model settings +model = dict( + type='CascadeRCNN', + num_stages=3, + pretrained='modelzoo://resnet101', + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067], + reg_class_agnostic=True) + ], + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=81)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False) + ], + stage_loss_weights=[1, 0.5, 0.25]) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5), + keep_all_stages=False) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=True, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/cascade_mask_rcnn_r101_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/cascade_mask_rcnn_r50_caffe_c4_1x.py b/configs/cascade_mask_rcnn_r50_caffe_c4_1x.py new file mode 100644 index 0000000..985945a --- /dev/null +++ b/configs/cascade_mask_rcnn_r50_caffe_c4_1x.py @@ -0,0 +1,236 @@ +# model settings +norm_cfg = dict(type='BN', requires_grad=False) +model = dict( + type='CascadeRCNN', + num_stages=3, + pretrained='open-mmlab://resnet50_caffe', + backbone=dict( + type='ResNet', + depth=50, + num_stages=3, + strides=(1, 2, 2), + dilations=(1, 1, 1), + out_indices=(2, ), + frozen_stages=1, + norm_cfg=norm_cfg, + norm_eval=True, + style='caffe'), + shared_head=dict( + type='ResLayer', + depth=50, + stage=3, + stride=2, + dilation=1, + style='caffe', + norm_cfg=norm_cfg, + norm_eval=True), + rpn_head=dict( + type='RPNHead', + in_channels=1024, + feat_channels=1024, + anchor_scales=[2, 4, 8, 16, 32], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[16], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=1024, + featmap_strides=[16]), + bbox_head=[ + dict( + type='BBoxHead', + with_avg_pool=True, + roi_feat_size=7, + in_channels=2048, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=True), + dict( + type='BBoxHead', + with_avg_pool=True, + roi_feat_size=7, + in_channels=2048, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1], + reg_class_agnostic=True), + dict( + type='BBoxHead', + with_avg_pool=True, + roi_feat_size=7, + in_channels=2048, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067], + reg_class_agnostic=True) + ], + mask_roi_extractor=None, + mask_head=dict( + type='FCNMaskHead', + num_convs=0, + in_channels=2048, + conv_out_channels=256, + num_classes=81)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=12000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=14, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=14, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=14, + pos_weight=-1, + debug=False) + ], + stage_loss_weights=[1, 0.5, 0.25]) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=6000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5), + keep_all_stages=False) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) +data = dict( + imgs_per_gpu=1, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=True, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/cascade_mask_rcnn_r50_caffe_c4_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/cascade_mask_rcnn_r50_fpn_1x.py b/configs/cascade_mask_rcnn_r50_fpn_1x.py new file mode 100644 index 0000000..7d89be3 --- /dev/null +++ b/configs/cascade_mask_rcnn_r50_fpn_1x.py @@ -0,0 +1,234 @@ +# model settings +model = dict( + type='CascadeRCNN', + num_stages=3, + pretrained='modelzoo://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067], + reg_class_agnostic=True) + ], + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=81)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False) + ], + stage_loss_weights=[1, 0.5, 0.25]) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5), + keep_all_stages=False) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=True, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/cascade_mask_rcnn_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/cascade_mask_rcnn_x101_32x4d_fpn_1x.py b/configs/cascade_mask_rcnn_x101_32x4d_fpn_1x.py new file mode 100644 index 0000000..7377432 --- /dev/null +++ b/configs/cascade_mask_rcnn_x101_32x4d_fpn_1x.py @@ -0,0 +1,236 @@ +# model settings +model = dict( + type='CascadeRCNN', + num_stages=3, + pretrained='open-mmlab://resnext101_32x4d', + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067], + reg_class_agnostic=True) + ], + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=81)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False) + ], + stage_loss_weights=[1, 0.5, 0.25]) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5), + keep_all_stages=False) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=True, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/cascade_mask_rcnn_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/cascade_mask_rcnn_x101_64x4d_fpn_1x.py b/configs/cascade_mask_rcnn_x101_64x4d_fpn_1x.py new file mode 100644 index 0000000..d71351e --- /dev/null +++ b/configs/cascade_mask_rcnn_x101_64x4d_fpn_1x.py @@ -0,0 +1,236 @@ +# model settings +model = dict( + type='CascadeRCNN', + num_stages=3, + pretrained='open-mmlab://resnext101_64x4d', + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067], + reg_class_agnostic=True) + ], + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=81)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False) + ], + stage_loss_weights=[1, 0.5, 0.25]) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5), + keep_all_stages=False) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=True, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/cascade_mask_rcnn_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/cascade_rcnn_r101_fpn_1x.py b/configs/cascade_rcnn_r101_fpn_1x.py new file mode 100644 index 0000000..0c11598 --- /dev/null +++ b/configs/cascade_rcnn_r101_fpn_1x.py @@ -0,0 +1,217 @@ +# model settings +model = dict( + type='CascadeRCNN', + num_stages=3, + pretrained='modelzoo://resnet101', + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067], + reg_class_agnostic=True) + ]) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False) + ], + stage_loss_weights=[1, 0.5, 0.25]) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100), + keep_all_stages=False) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/cascade_rcnn_r101_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/cascade_rcnn_r50_caffe_c4_1x.py b/configs/cascade_rcnn_r50_caffe_c4_1x.py new file mode 100644 index 0000000..8224f2f --- /dev/null +++ b/configs/cascade_rcnn_r50_caffe_c4_1x.py @@ -0,0 +1,226 @@ +# model settings +norm_cfg = dict(type='BN', requires_grad=False) +model = dict( + type='CascadeRCNN', + num_stages=3, + pretrained='open-mmlab://resnet50_caffe', + backbone=dict( + type='ResNet', + depth=50, + num_stages=3, + strides=(1, 2, 2), + dilations=(1, 1, 1), + out_indices=(2, ), + frozen_stages=1, + norm_cfg=norm_cfg, + norm_eval=True, + style='caffe'), + shared_head=dict( + type='ResLayer', + depth=50, + stage=3, + stride=2, + dilation=1, + style='caffe', + norm_cfg=norm_cfg, + norm_eval=True), + rpn_head=dict( + type='RPNHead', + in_channels=1024, + feat_channels=1024, + anchor_scales=[2, 4, 8, 16, 32], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[16], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=1024, + featmap_strides=[16]), + bbox_head=[ + dict( + type='BBoxHead', + with_avg_pool=True, + roi_feat_size=7, + in_channels=2048, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=True), + dict( + type='BBoxHead', + with_avg_pool=True, + roi_feat_size=7, + in_channels=2048, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1], + reg_class_agnostic=True), + dict( + type='BBoxHead', + with_avg_pool=True, + roi_feat_size=7, + in_channels=2048, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067], + reg_class_agnostic=True) + ]) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=12000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=14, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=14, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=14, + pos_weight=-1, + debug=False) + ], + stage_loss_weights=[1, 0.5, 0.25]) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=6000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100), + keep_all_stages=False) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) +data = dict( + imgs_per_gpu=1, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=True, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/cascade_rcnn_r50_c4_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/cascade_rcnn_r50_fpn_1x.py b/configs/cascade_rcnn_r50_fpn_1x.py new file mode 100644 index 0000000..de0ca00 --- /dev/null +++ b/configs/cascade_rcnn_r50_fpn_1x.py @@ -0,0 +1,217 @@ +# model settings +model = dict( + type='CascadeRCNN', + num_stages=3, + pretrained='modelzoo://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067], + reg_class_agnostic=True) + ]) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False) + ], + stage_loss_weights=[1, 0.5, 0.25]) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100), + keep_all_stages=False) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/cascade_rcnn_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/cascade_rcnn_x101_32x4d_fpn_1x.py b/configs/cascade_rcnn_x101_32x4d_fpn_1x.py new file mode 100644 index 0000000..ba7c890 --- /dev/null +++ b/configs/cascade_rcnn_x101_32x4d_fpn_1x.py @@ -0,0 +1,219 @@ +# model settings +model = dict( + type='CascadeRCNN', + num_stages=3, + pretrained='open-mmlab://resnext101_32x4d', + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067], + reg_class_agnostic=True) + ]) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False) + ], + stage_loss_weights=[1, 0.5, 0.25]) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100), + keep_all_stages=False) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/cascade_rcnn_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/cascade_rcnn_x101_64x4d_fpn_1x.py b/configs/cascade_rcnn_x101_64x4d_fpn_1x.py new file mode 100644 index 0000000..4203956 --- /dev/null +++ b/configs/cascade_rcnn_x101_64x4d_fpn_1x.py @@ -0,0 +1,219 @@ +# model settings +model = dict( + type='CascadeRCNN', + num_stages=3, + pretrained='open-mmlab://resnext101_64x4d', + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067], + reg_class_agnostic=True) + ]) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False) + ], + stage_loss_weights=[1, 0.5, 0.25]) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100), + keep_all_stages=False) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/cascade_rcnn_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/dcn/README.md b/configs/dcn/README.md new file mode 100644 index 0000000..d95ee40 --- /dev/null +++ b/configs/dcn/README.md @@ -0,0 +1,43 @@ +# Deformable Convolutional Networks + +# Introduction + +``` +@inproceedings{dai2017deformable, + title={Deformable Convolutional Networks}, + author={Dai, Jifeng and Qi, Haozhi and Xiong, Yuwen and Li, Yi and Zhang, Guodong and Hu, Han and Wei, Yichen}, + booktitle={Proceedings of the IEEE international conference on computer vision}, + year={2017} +} + +@article{zhu2018deformable, + title={Deformable ConvNets v2: More Deformable, Better Results}, + author={Zhu, Xizhou and Hu, Han and Lin, Stephen and Dai, Jifeng}, + journal={arXiv preprint arXiv:1811.11168}, + year={2018} +} +``` + +## Results and Models + +| Backbone | Model | Style | Conv | Pool | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | mask AP | Download | +|:---------:|:------------:|:-------:|:-------------:|:------:|:-------:|:--------:|:-------------------:|:--------------:|:------:|:-------:|:--------:| +| R-50-FPN | Faster | pytorch | dconv(c3-c5) | - | 1x | 3.9 | 0.594 | 10.2 | 40.0 | - | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/dcn/faster_rcnn_dconv_c3-c5_r50_fpn_1x_20190125-e41688c9.pth) | +| R-50-FPN | Faster | pytorch | mdconv(c3-c5) | - | 1x | 3.7 | 0.598 | 10.0 | 40.2 | - | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/dcn/faster_rcnn_mdconv_c3-c5_r50_fpn_1x_20190125-1b768045.pth) | +| R-50-FPN | Faster | pytorch | - | dpool | 1x | 4.6 | 0.714 | 8.7 | 37.8 | - | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/dcn/faster_rcnn_dpool_r50_fpn_1x_20190125-f4fc1d70.pth) | +| R-50-FPN | Faster | pytorch | - | mdpool | 1x | 5.2 | 0.769 | 8.2 | 38.0 | - | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/dcn/faster_rcnn_mdpool_r50_fpn_1x_20190125-473d0f3d.pth) | +| R-101-FPN | Faster | pytorch | dconv(c3-c5) | - | 1x | 5.8 | 0.811 | 8.0 | 42.1 | - | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/dcn/faster_rcnn_dconv_c3-c5_r101_fpn_1x_20190125-a7e31b65.pth) | +| X-101-32x4d-FPN | Faster | pytorch | dconv(c3-c5) | - | 1x | 7.1 | 1.126 | 6.6 | 43.4 | - | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/dcn/faster_rcnn_dconv_c3-c5_x101_32x4d_fpn_1x_20190201-6d46376f.pth) | +| R-50-FPN | Mask | pytorch | dconv(c3-c5) | - | 1x | 4.5 | 0.712 | 7.7 | 41.1 | 37.2 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/dcn/mask_rcnn_dconv_c3-c5_r50_fpn_1x_20190125-4f94ff79.pth) | +| R-50-FPN | Mask | pytorch | mdconv(c3-c5) | - | 1x | 4.5 | 0.712 | 7.7 | 41.3 | 37.3 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/dcn/mask_rcnn_mdconv_c3-c5_r50_fpn_1x_20190125-c5601dc3.pth) | +| R-101-FPN | Mask | pytorch | dconv(c3-c5) | - | 1x | 6.4 | 0.939 | 6.5 | 43.2 | 38.7 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/dcn/mask_rcnn_dconv_c3-c5_r101_fpn_1x_20190125-decb6db5.pth) | +| R-50-FPN | Cascade | pytorch | dconv(c3-c5) | - | 1x | 4.4 | 0.660 | 7.6 | 44.0 | - | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/dcn/cascade_rcnn_dconv_c3-c5_r50_fpn_1x_20190125-dfa53166.pth) | +| R-101-FPN | Cascade | pytorch | dconv(c3-c5) | - | 1x | 6.3 | 0.881 | 6.8 | 45.0 | - | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/dcn/cascade_rcnn_dconv_c3-c5_r101_fpn_1x_20190125-aaa877cc.pth) | +| R-50-FPN | Cascade Mask | pytorch | dconv(c3-c5) | - | 1x | 6.6 | 0.942 | 5.7 | 44.4 | 38.3 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/dcn/cascade_mask_rcnn_dconv_c3-c5_r50_fpn_1x_20190125-09d8a443.pth) | +| R-101-FPN | Cascade Mask | pytorch | dconv(c3-c5) | - | 1x | 8.5 | 1.156 | 5.1 | 45.7 | 39.4 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/dcn/cascade_mask_rcnn_dconv_c3-c5_r101_fpn_1x_20190125-0d62c190.pth) | + +**Notes:** + +- `dconv` and `mdconv` denote (modulated) deformable convolution, `c3-c5` means adding dconv in resnet stage 3 to 5. `dpool` and `mdpool` denote (modulated) deformable roi pooling. +- The dcn ops are modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch, which should be more memory efficient and slightly faster. +- **Memory, Train/Inf time is outdated.** \ No newline at end of file diff --git a/configs/dcn/cascade_mask_rcnn_dconv_c3-c5_r50_fpn_1x.py b/configs/dcn/cascade_mask_rcnn_dconv_c3-c5_r50_fpn_1x.py new file mode 100644 index 0000000..711231e --- /dev/null +++ b/configs/dcn/cascade_mask_rcnn_dconv_c3-c5_r50_fpn_1x.py @@ -0,0 +1,237 @@ +# model settings +model = dict( + type='CascadeRCNN', + num_stages=3, + pretrained='modelzoo://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + dcn=dict( + modulated=False, deformable_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True)), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067], + reg_class_agnostic=True) + ], + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=81)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False) + ], + stage_loss_weights=[1, 0.5, 0.25]) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5), + keep_all_stages=False) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=True, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/cascade_mask_rcnn_dconv_c3-c5_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/dcn/cascade_rcnn_dconv_c3-c5_r50_fpn_1x.py b/configs/dcn/cascade_rcnn_dconv_c3-c5_r50_fpn_1x.py new file mode 100644 index 0000000..cc00d91 --- /dev/null +++ b/configs/dcn/cascade_rcnn_dconv_c3-c5_r50_fpn_1x.py @@ -0,0 +1,220 @@ +# model settings +model = dict( + type='CascadeRCNN', + num_stages=3, + pretrained='modelzoo://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + dcn=dict( + modulated=False, deformable_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True)), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067], + reg_class_agnostic=True) + ]) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False) + ], + stage_loss_weights=[1, 0.5, 0.25]) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100), + keep_all_stages=False) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/cascade_rcnn_dconv_c3-c5_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/dcn/faster_rcnn_dconv_c3-c5_r50_fpn_1x.py b/configs/dcn/faster_rcnn_dconv_c3-c5_r50_fpn_1x.py new file mode 100644 index 0000000..5e47eae --- /dev/null +++ b/configs/dcn/faster_rcnn_dconv_c3-c5_r50_fpn_1x.py @@ -0,0 +1,166 @@ +# model settings +model = dict( + type='FasterRCNN', + pretrained='modelzoo://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + dcn=dict( + modulated=False, deformable_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True)), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100) + # soft-nms is also supported for rcnn testing + # e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05) +) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/faster_rcnn_dconv_c3-c5_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/dcn/faster_rcnn_dconv_c3-c5_x101_32x4d_fpn_1x.py b/configs/dcn/faster_rcnn_dconv_c3-c5_x101_32x4d_fpn_1x.py new file mode 100644 index 0000000..aab40b9 --- /dev/null +++ b/configs/dcn/faster_rcnn_dconv_c3-c5_x101_32x4d_fpn_1x.py @@ -0,0 +1,171 @@ +# model settings +model = dict( + type='FasterRCNN', + pretrained='open-mmlab://resnext101_32x4d', + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + dcn=dict( + modulated=False, + groups=32, + deformable_groups=1, + fallback_on_stride=False), + stage_with_dcn=(False, True, True, True)), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100) + # soft-nms is also supported for rcnn testing + # e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05) +) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/faster_rcnn_dconv_c3-c5_x101_32x4d_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/dcn/faster_rcnn_dpool_r50_fpn_1x.py b/configs/dcn/faster_rcnn_dpool_r50_fpn_1x.py new file mode 100644 index 0000000..9d94e57 --- /dev/null +++ b/configs/dcn/faster_rcnn_dpool_r50_fpn_1x.py @@ -0,0 +1,169 @@ +# model settings +model = dict( + type='FasterRCNN', + pretrained='modelzoo://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict( + type='DeformRoIPoolingPack', + out_size=7, + out_channels=256, + no_trans=False, + group_size=1, + trans_std=0.1), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100) + # soft-nms is also supported for rcnn testing + # e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05) +) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/faster_rcnn_dpool_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/dcn/faster_rcnn_mdconv_c3-c5_r50_fpn_1x.py b/configs/dcn/faster_rcnn_mdconv_c3-c5_r50_fpn_1x.py new file mode 100644 index 0000000..73dc579 --- /dev/null +++ b/configs/dcn/faster_rcnn_mdconv_c3-c5_r50_fpn_1x.py @@ -0,0 +1,166 @@ +# model settings +model = dict( + type='FasterRCNN', + pretrained='modelzoo://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + dcn=dict( + modulated=True, deformable_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True)), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100) + # soft-nms is also supported for rcnn testing + # e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05) +) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/faster_rcnn_mdconv_c3-c5_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/dcn/faster_rcnn_mdpool_r50_fpn_1x.py b/configs/dcn/faster_rcnn_mdpool_r50_fpn_1x.py new file mode 100644 index 0000000..b55a34c --- /dev/null +++ b/configs/dcn/faster_rcnn_mdpool_r50_fpn_1x.py @@ -0,0 +1,169 @@ +# model settings +model = dict( + type='FasterRCNN', + pretrained='modelzoo://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict( + type='ModulatedDeformRoIPoolingPack', + out_size=7, + out_channels=256, + no_trans=False, + group_size=1, + trans_std=0.1), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100) + # soft-nms is also supported for rcnn testing + # e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05) +) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/faster_rcnn_mdpool_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/dcn/mask_rcnn_dconv_c3-c5_r50_fpn_1x.py b/configs/dcn/mask_rcnn_dconv_c3-c5_r50_fpn_1x.py new file mode 100644 index 0000000..8135f09 --- /dev/null +++ b/configs/dcn/mask_rcnn_dconv_c3-c5_r50_fpn_1x.py @@ -0,0 +1,178 @@ +# model settings +model = dict( + type='MaskRCNN', + pretrained='modelzoo://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + dcn=dict( + modulated=False, deformable_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True)), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=81)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=True, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/mask_rcnn_dconv_c3-c5_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/fast_mask_rcnn_r101_fpn_1x.py b/configs/fast_mask_rcnn_r101_fpn_1x.py new file mode 100644 index 0000000..fa64d6f --- /dev/null +++ b/configs/fast_mask_rcnn_r101_fpn_1x.py @@ -0,0 +1,137 @@ +# model settings +model = dict( + type='FastRCNN', + pretrained='modelzoo://resnet101', + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=81)) +# model training and testing settings +train_cfg = dict( + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)) +test_cfg = dict( + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_train2017.pkl', + flip_ratio=0.5, + with_mask=True, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl', + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl', + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/fast_mask_rcnn_r101_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/fast_mask_rcnn_r50_caffe_c4_1x.py b/configs/fast_mask_rcnn_r50_caffe_c4_1x.py new file mode 100644 index 0000000..1aa97e0 --- /dev/null +++ b/configs/fast_mask_rcnn_r50_caffe_c4_1x.py @@ -0,0 +1,141 @@ +# model settings +norm_cfg = dict(type='BN', requires_grad=False) +model = dict( + type='FastRCNN', + pretrained='open-mmlab://resnet50_caffe', + backbone=dict( + type='ResNet', + depth=50, + num_stages=3, + strides=(1, 2, 2), + dilations=(1, 1, 1), + out_indices=(2, ), + frozen_stages=1, + norm_cfg=norm_cfg, + norm_eval=True, + style='caffe'), + shared_head=dict( + type='ResLayer', + depth=50, + stage=3, + stride=2, + dilation=1, + style='caffe', + norm_cfg=norm_cfg, + norm_eval=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=1024, + featmap_strides=[16]), + bbox_head=dict( + type='BBoxHead', + with_avg_pool=True, + roi_feat_size=7, + in_channels=2048, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False), + mask_roi_extractor=None, + mask_head=dict( + type='FCNMaskHead', + num_convs=0, + in_channels=2048, + conv_out_channels=256, + num_classes=81)) +# model training and testing settings +train_cfg = dict( + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=14, + pos_weight=-1, + debug=False)) +test_cfg = dict( + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) +data = dict( + imgs_per_gpu=1, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + proposal_file=data_root + 'proposals/rpn_r50_c4_1x_train2017.pkl', + flip_ratio=0.5, + with_mask=True, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + proposal_file=data_root + 'proposals/rpn_r50_c4_1x_val2017.pkl', + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + proposal_file=data_root + 'proposals/rpn_r50_c4_1x_val2017.pkl', + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/fast_mask_rcnn_r50_caffe_c4_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/fast_mask_rcnn_r50_fpn_1x.py b/configs/fast_mask_rcnn_r50_fpn_1x.py new file mode 100644 index 0000000..2005100 --- /dev/null +++ b/configs/fast_mask_rcnn_r50_fpn_1x.py @@ -0,0 +1,137 @@ +# model settings +model = dict( + type='FastRCNN', + pretrained='modelzoo://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=81)) +# model training and testing settings +train_cfg = dict( + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)) +test_cfg = dict( + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_train2017.pkl', + flip_ratio=0.5, + with_mask=True, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl', + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl', + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/fast_mask_rcnn_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/fast_rcnn_r101_fpn_1x.py b/configs/fast_rcnn_r101_fpn_1x.py new file mode 100644 index 0000000..c61b74f --- /dev/null +++ b/configs/fast_rcnn_r101_fpn_1x.py @@ -0,0 +1,122 @@ +# model settings +model = dict( + type='FastRCNN', + pretrained='modelzoo://resnet101', + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False)) +# model training and testing settings +train_cfg = dict( + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)) +test_cfg = dict( + rcnn=dict( + score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_train2017.pkl', + flip_ratio=0.5, + with_mask=False, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl', + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl', + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/fast_rcnn_r101_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/fast_rcnn_r50_caffe_c4_1x.py b/configs/fast_rcnn_r50_caffe_c4_1x.py new file mode 100644 index 0000000..20481f3 --- /dev/null +++ b/configs/fast_rcnn_r50_caffe_c4_1x.py @@ -0,0 +1,130 @@ +# model settings +norm_cfg = dict(type='BN', requires_grad=False) +model = dict( + type='FastRCNN', + pretrained='open-mmlab://resnet50_caffe', + backbone=dict( + type='ResNet', + depth=50, + num_stages=3, + strides=(1, 2, 2), + dilations=(1, 1, 1), + out_indices=(2, ), + frozen_stages=1, + norm_cfg=norm_cfg, + norm_eval=True, + style='caffe'), + shared_head=dict( + type='ResLayer', + depth=50, + stage=3, + stride=2, + dilation=1, + style='caffe', + norm_cfg=norm_cfg, + norm_eval=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=1024, + featmap_strides=[16]), + bbox_head=dict( + type='BBoxHead', + with_avg_pool=True, + roi_feat_size=7, + in_channels=2048, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False)) +# model training and testing settings +train_cfg = dict( + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)) +test_cfg = dict( + rcnn=dict( + score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) +data = dict( + imgs_per_gpu=1, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + proposal_file=data_root + 'proposals/rpn_r50_c4_1x_train2017.pkl', + flip_ratio=0.5, + with_mask=False, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + proposal_file=data_root + 'proposals/rpn_r50_c4_1x_val2017.pkl', + flip_ratio=0, + with_mask=False, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + proposal_file=data_root + 'proposals/rpn_r50_c4_1x_val2017.pkl', + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/fast_rcnn_r50_caffe_c4_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/fast_rcnn_r50_fpn_1x.py b/configs/fast_rcnn_r50_fpn_1x.py new file mode 100644 index 0000000..542e2dd --- /dev/null +++ b/configs/fast_rcnn_r50_fpn_1x.py @@ -0,0 +1,122 @@ +# model settings +model = dict( + type='FastRCNN', + pretrained='modelzoo://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False)) +# model training and testing settings +train_cfg = dict( + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)) +test_cfg = dict( + rcnn=dict( + score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_train2017.pkl', + flip_ratio=0.5, + with_mask=False, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl', + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl', + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/fast_rcnn_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/faster_rcnn_ohem_r50_fpn_1x.py b/configs/faster_rcnn_ohem_r50_fpn_1x.py new file mode 100644 index 0000000..9311c5f --- /dev/null +++ b/configs/faster_rcnn_ohem_r50_fpn_1x.py @@ -0,0 +1,163 @@ +# model settings +model = dict( + type='FasterRCNN', + pretrained='modelzoo://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='OHEMSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100) + # soft-nms is also supported for rcnn testing + # e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05) +) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/faster_rcnn_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/faster_rcnn_r101_fpn_1x.py b/configs/faster_rcnn_r101_fpn_1x.py new file mode 100644 index 0000000..90a3aad --- /dev/null +++ b/configs/faster_rcnn_r101_fpn_1x.py @@ -0,0 +1,163 @@ +# model settings +model = dict( + type='FasterRCNN', + pretrained='modelzoo://resnet101', + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100) + # soft-nms is also supported for rcnn testing + # e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05) +) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/faster_rcnn_r101_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/faster_rcnn_r50_caffe_c4_1x.py b/configs/faster_rcnn_r50_caffe_c4_1x.py new file mode 100644 index 0000000..24b1f0d --- /dev/null +++ b/configs/faster_rcnn_r50_caffe_c4_1x.py @@ -0,0 +1,168 @@ +# model settings +norm_cfg = dict(type='BN', requires_grad=False) +model = dict( + type='FasterRCNN', + pretrained='open-mmlab://resnet50_caffe', + backbone=dict( + type='ResNet', + depth=50, + num_stages=3, + strides=(1, 2, 2), + dilations=(1, 1, 1), + out_indices=(2, ), + frozen_stages=1, + norm_cfg=norm_cfg, + norm_eval=True, + style='caffe'), + shared_head=dict( + type='ResLayer', + depth=50, + stage=3, + stride=2, + dilation=1, + style='caffe', + norm_cfg=norm_cfg, + norm_eval=True), + rpn_head=dict( + type='RPNHead', + in_channels=1024, + feat_channels=1024, + anchor_scales=[2, 4, 8, 16, 32], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[16], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=1024, + featmap_strides=[16]), + bbox_head=dict( + type='BBoxHead', + with_avg_pool=True, + roi_feat_size=7, + in_channels=2048, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=12000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=6000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) +data = dict( + imgs_per_gpu=1, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/faster_rcnn_r50_caffe_c4_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/faster_rcnn_r50_fpn_1x.py b/configs/faster_rcnn_r50_fpn_1x.py new file mode 100644 index 0000000..bcfcd15 --- /dev/null +++ b/configs/faster_rcnn_r50_fpn_1x.py @@ -0,0 +1,163 @@ +# model settings +model = dict( + type='FasterRCNN', + pretrained='modelzoo://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100) + # soft-nms is also supported for rcnn testing + # e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05) +) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/faster_rcnn_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/faster_rcnn_x101_32x4d_fpn_1x.py b/configs/faster_rcnn_x101_32x4d_fpn_1x.py new file mode 100644 index 0000000..cdeee64 --- /dev/null +++ b/configs/faster_rcnn_x101_32x4d_fpn_1x.py @@ -0,0 +1,165 @@ +# model settings +model = dict( + type='FasterRCNN', + pretrained='open-mmlab://resnext101_32x4d', + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100) + # soft-nms is also supported for rcnn testing + # e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05) +) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/faster_rcnn_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/faster_rcnn_x101_64x4d_fpn_1x.py b/configs/faster_rcnn_x101_64x4d_fpn_1x.py new file mode 100644 index 0000000..647d07c --- /dev/null +++ b/configs/faster_rcnn_x101_64x4d_fpn_1x.py @@ -0,0 +1,165 @@ +# model settings +model = dict( + type='FasterRCNN', + pretrained='open-mmlab://resnext101_64x4d', + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100) + # soft-nms is also supported for rcnn testing + # e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05) +) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/faster_rcnn_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/fcos/README.md b/configs/fcos/README.md new file mode 100644 index 0000000..3e31b22 --- /dev/null +++ b/configs/fcos/README.md @@ -0,0 +1,25 @@ +# FCOS: Fully Convolutional One-Stage Object Detection + +## Introduction + +``` +@article{tian2019fcos, + title={FCOS: Fully Convolutional One-Stage Object Detection}, + author={Tian, Zhi and Shen, Chunhua and Chen, Hao and He, Tong}, + journal={arXiv preprint arXiv:1904.01355}, + year={2019} +} +``` + +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Download | +|:---------:|:-------:|:-------:|:--------:|:-------------------:|:--------------:|:------:|:--------:| +| R-50-FPN | caffe | 1x | 6.9 | 0.396 | 13.6 | 36.7 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fcos/fcos_r50_fpn_1x-9f253a93.pth) | +| R-50-FPN | caffe | 2x | - | - | - | 38.7 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fcos/fcos_r50_fpn_2x-f7329d80.pth) | +| R-101-FPN | caffe | 1x | 10.4 | 0.558 | 11.6 | 39.1 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fcos/fcos_r101_fpn_1x-e4889733.pth) | +| R-101-FPN | caffe | 2x | - | - | - | 40.8 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fcos/fcos_r101_fpn_2x-42e6f62d.pth) | +| X-101-64x4d-FPN | caffe |2x | 9.7 | 0.892 | 7.0 | 42.8 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/fcos/fcos_x101_64x4d_fpn_2x-a36c0872.pth) | + +**Notes:** +- To be consistent with the author's implementation, we use 4 GPUs with 4 images/GPU for R-50 and R-101 models, and 8 GPUs with 2 image/GPU for X-101 models. diff --git a/configs/fcos/fcos_mstrain_640_800_r101_caffe_fpn_gn_2x_4gpu.py b/configs/fcos/fcos_mstrain_640_800_r101_caffe_fpn_gn_2x_4gpu.py new file mode 100644 index 0000000..41297fc --- /dev/null +++ b/configs/fcos/fcos_mstrain_640_800_r101_caffe_fpn_gn_2x_4gpu.py @@ -0,0 +1,124 @@ +# model settings +model = dict( + type='FCOS', + pretrained='open-mmlab://resnet101_caffe', + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + style='caffe'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs=True, + extra_convs_on_inputs=False, # use P5 + num_outs=5, + relu_before_extra_convs=True), + bbox_head=dict( + type='FCOSHead', + num_classes=81, + in_channels=256, + stacked_convs=4, + feat_channels=256, + strides=[8, 16, 32, 64, 128])) +# training and testing settings +train_cfg = dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + smoothl1_beta=0.11, + gamma=2.0, + alpha=0.25, + allowed_border=-1, + pos_weight=-1, + debug=False) +test_cfg = dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) +data = dict( + imgs_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='value', + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=False, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=False, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict( + type='SGD', + lr=0.01, + momentum=0.9, + weight_decay=0.0001, + paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.)) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='constant', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[16, 22]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 24 +device_ids = range(4) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/fcos_mstrain_640_800_r101_caffe_fpn_gn_2x_4gpu' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/fcos/fcos_mstrain_640_800_x101_64x4d_fpn_gn_2x.py b/configs/fcos/fcos_mstrain_640_800_x101_64x4d_fpn_gn_2x.py new file mode 100644 index 0000000..4f9352c --- /dev/null +++ b/configs/fcos/fcos_mstrain_640_800_x101_64x4d_fpn_gn_2x.py @@ -0,0 +1,125 @@ +# model settings +model = dict( + type='FCOS', + pretrained='open-mmlab://resnext101_64x4d', + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs=True, + extra_convs_on_inputs=False, # use P5 + num_outs=5, + relu_before_extra_convs=True), + bbox_head=dict( + type='FCOSHead', + num_classes=81, + in_channels=256, + stacked_convs=4, + feat_channels=256, + strides=[8, 16, 32, 64, 128])) +# training and testing settings +train_cfg = dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + smoothl1_beta=0.11, + gamma=2.0, + alpha=0.25, + allowed_border=-1, + pos_weight=-1, + debug=False) +test_cfg = dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='value', + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=False, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=False, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict( + type='SGD', + lr=0.01, + momentum=0.9, + weight_decay=0.0001, + paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.)) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='constant', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[16, 22]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 24 +device_ids = range(8) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/fcos_mstrain_640_800_x101_64x4d_fpn_gn_2x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/fcos/fcos_r50_caffe_fpn_gn_1x_4gpu.py b/configs/fcos/fcos_r50_caffe_fpn_gn_1x_4gpu.py new file mode 100644 index 0000000..dd63ccf --- /dev/null +++ b/configs/fcos/fcos_r50_caffe_fpn_gn_1x_4gpu.py @@ -0,0 +1,123 @@ +# model settings +model = dict( + type='FCOS', + pretrained='open-mmlab://resnet50_caffe', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + style='caffe'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs=True, + extra_convs_on_inputs=False, # use P5 + num_outs=5, + relu_before_extra_convs=True), + bbox_head=dict( + type='FCOSHead', + num_classes=81, + in_channels=256, + stacked_convs=4, + feat_channels=256, + strides=[8, 16, 32, 64, 128])) +# training and testing settings +train_cfg = dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + smoothl1_beta=0.11, + gamma=2.0, + alpha=0.25, + allowed_border=-1, + pos_weight=-1, + debug=False) +test_cfg = dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) +data = dict( + imgs_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=False, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=False, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict( + type='SGD', + lr=0.01, + momentum=0.9, + weight_decay=0.0001, + paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.)) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='constant', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +device_ids = range(4) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/fcos_r50_caffe_fpn_gn_1x_4gpu' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/gn+ws/README.md b/configs/gn+ws/README.md new file mode 100644 index 0000000..511f22c --- /dev/null +++ b/configs/gn+ws/README.md @@ -0,0 +1,54 @@ +# Weight Standardization + +## Introduction + +``` +@article{weightstandardization, + author = {Siyuan Qiao and Huiyu Wang and Chenxi Liu and Wei Shen and Alan Yuille}, + title = {Weight Standardization}, + journal = {arXiv preprint arXiv:1903.10520}, + year = {2019}, +} +``` + +## Results and Models + +Faster R-CNN + +| Backbone | Style | Normalization | Lr schd | box AP | mask AP | Download | +|:---------:|:-------:|:-------------:|:-------:|:------:|:-------:|:--------:| +| R-50-FPN | pytorch | GN | 1x | 37.8 | - | - | +| R-50-FPN | pytorch | GN+WS | 1x | 38.9 | - | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ws/faster_rcnn_r50_fpn_gn_ws_1x_20190418-935d00b6.pth) | +| R-101-FPN | pytorch | GN | 1x | 39.8 | - | - | +| R-101-FPN | pytorch | GN+WS | 1x | 41.4 | - | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ws/faster_rcnn_r101_fpn_gn_ws_1x_20190419-728705ec.pth) | +| X-50-32x4d-FPN | pytorch | GN | 1x | 36.5 | - | - | +| X-50-32x4d-FPN | pytorch | GN+WS | 1x | 39.9 | - | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ws/faster_rcnn_x50_32x4d_fpn_gn_ws_1x_20190419-4e61072b.pth) | +| X-101-32x4d-FPN | pytorch | GN | 1x | 33.2 | - | - | +| X-101-32x4d-FPN | pytorch | GN+WS | 1x | 41.8 | - | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ws/faster_rcnn_x101_32x4d_fpn_gn_ws_1x_20190419-c78e5583.pth) | + +Mask R-CNN + +| Backbone | Style | Normalization | Lr schd | box AP | mask AP | Download | +|:---------:|:-------:|:-------------:|:-------:|:------:|:-------:|:--------:| +| R-50-FPN | pytorch | GN | 2x | 39.9 | 36.0 | - | +| R-50-FPN | pytorch | GN+WS | 2x | 40.3 | 36.2 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ws/mask_rcnn_r50_fpn_gn_ws_2x_20190419-9ec97bbb.pth) | +| R-101-FPN | pytorch | GN | 2x | 41.6 | 37.3 | - | +| R-101-FPN | pytorch | GN+WS | 2x | 42.0 | 37.3 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ws/mask_rcnn_r101_fpn_gn_ws_2x_20190419-bc7399a6.pth) | +| X-50-32x4d-FPN | pytorch | GN | 2x | 39.2 | 35.5 | - | +| X-50-32x4d-FPN | pytorch | GN+WS | 2x | 40.7 | 36.7 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ws/mask_rcnn_x50_32x4d_fpn_gn_ws_2x_20190419-2110205e.pth) | +| X-101-32x4d-FPN | pytorch | GN | 2x | 36.4 | 33.1 | - | +| X-101-32x4d-FPN | pytorch | GN+WS | 2x | 42.1 | 37.7 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ws/mask_rcnn_x101_32x4d_fpn_gn_ws_2x_20190419-7777b15f.pth) | +| R-50-FPN | pytorch | GN | 20-23-24e | 40.6 | 36.6 | - | +| R-50-FPN | pytorch | GN+WS | 20-23-24e | 41.1 | 37.0 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ws/mask_rcnn_r50_fpn_gn_ws_20_23_24e_20190425-1d9e499e.pth) | +| R-101-FPN | pytorch | GN | 20-23-24e | 42.3 | 38.1 | - | +| R-101-FPN | pytorch | GN+WS | 20-23-24e | 43.0 | 38.4 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ws/mask_rcnn_r101_fpn_gn_ws_20_23_24e_20190425-66cb3792.pth) | +| X-50-32x4d-FPN | pytorch | GN | 20-23-24e | 39.6 | 35.9 | - | +| X-50-32x4d-FPN | pytorch | GN+WS | 20-23-24e | 41.9 | 37.7 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ws/mask_rcnn_x50_32x4d_fpn_gn_ws_20_23_24e_20190425-d01e2200.pth) | +| X-101-32x4d-FPN | pytorch | GN | 20-23-24e | 36.6 | 33.4 | - | +| X-101-32x4d-FPN | pytorch | GN+WS | 20-23-24e | 43.4 | 38.7 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ws/mask_rcnn_x101_32x4d_fpn_gn_ws_20_23_24e_20190425-1ff3e5b2.pth) | + +Note: + +- GN+WS requires about 5% more memory than GN, and it is only 5% slower than GN. +- In the paper, a 20-23-24e lr schedule is used instead of 2x. +- The X-50-GN and X-101-GN pretrained models are also shared by the authors. \ No newline at end of file diff --git a/configs/gn+ws/faster_rcnn_r50_fpn_gn_ws_1x.py b/configs/gn+ws/faster_rcnn_r50_fpn_gn_ws_1x.py new file mode 100644 index 0000000..396993f --- /dev/null +++ b/configs/gn+ws/faster_rcnn_r50_fpn_gn_ws_1x.py @@ -0,0 +1,170 @@ +# model settings +conv_cfg = dict(type='ConvWS') +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + type='FasterRCNN', + pretrained='open-mmlab://jhu/resnet50_gn_ws', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + conv_cfg=conv_cfg, + norm_cfg=norm_cfg), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='ConvFCBBoxHead', + num_shared_convs=4, + num_shared_fcs=1, + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/faster_rcnn_r50_fpn_gn_ws_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws_20_23_24e.py b/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws_20_23_24e.py new file mode 100644 index 0000000..5a79bfd --- /dev/null +++ b/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws_20_23_24e.py @@ -0,0 +1,187 @@ +# model settings +conv_cfg = dict(type='ConvWS') +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + type='MaskRCNN', + pretrained='open-mmlab://jhu/resnet50_gn_ws', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + conv_cfg=conv_cfg, + norm_cfg=norm_cfg), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='ConvFCBBoxHead', + num_shared_convs=4, + num_shared_fcs=1, + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=81, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=True, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[20, 23]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 24 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/mask_rcnn_r50_fpn_gn_ws_20_23_24e' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws_2x.py b/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws_2x.py new file mode 100644 index 0000000..7294aee --- /dev/null +++ b/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws_2x.py @@ -0,0 +1,187 @@ +# model settings +conv_cfg = dict(type='ConvWS') +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + type='MaskRCNN', + pretrained='open-mmlab://jhu/resnet50_gn_ws', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + conv_cfg=conv_cfg, + norm_cfg=norm_cfg), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='ConvFCBBoxHead', + num_shared_convs=4, + num_shared_fcs=1, + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=81, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=True, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[16, 22]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 24 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/mask_rcnn_r50_fpn_gn_ws_2x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws_2x.py b/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws_2x.py new file mode 100644 index 0000000..4ed83b2 --- /dev/null +++ b/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws_2x.py @@ -0,0 +1,189 @@ +# model settings +conv_cfg = dict(type='ConvWS') +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + type='MaskRCNN', + pretrained='open-mmlab://jhu/resnext101_32x4d_gn_ws', + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + conv_cfg=conv_cfg, + norm_cfg=norm_cfg), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='ConvFCBBoxHead', + num_shared_convs=4, + num_shared_fcs=1, + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=81, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=True, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[16, 22]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 24 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/mask_rcnn_x101_32x4d_fpn_gn_ws_2x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/gn/README.md b/configs/gn/README.md new file mode 100644 index 0000000..6bced47 --- /dev/null +++ b/configs/gn/README.md @@ -0,0 +1,28 @@ +# Group Normalization + +## Introduction + +``` +@inproceedings{wu2018group, + title={Group Normalization}, + author={Wu, Yuxin and He, Kaiming}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2018} +} +``` + +## Results and Models + +| Backbone | model | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | mask AP | Download | +|:-------------:|:----------:|:-------:|:--------:|:-------------------:|:--------------:|:------:|:-------:|:--------:| +| R-50-FPN (d) | Mask R-CNN | 2x | 7.2 | 0.806 | 5.4 | 39.8 | 36.1 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/gn/mask_rcnn_r50_fpn_gn_2x_20180113-86832cf2.pth) | +| R-50-FPN (d) | Mask R-CNN | 3x | 7.2 | 0.806 | 5.4 | 40.1 | 36.4 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/gn/mask_rcnn_r50_fpn_gn_3x_20180113-8e82f48d.pth) | +| R-101-FPN (d) | Mask R-CNN | 2x | 9.9 | 0.970 | 4.8 | 41.5 | 37.0 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/gn/mask_rcnn_r101_fpn_gn_2x_20180113-9598649c.pth) | +| R-101-FPN (d) | Mask R-CNN | 3x | 9.9 | 0.970 | 4.8 | 41.6 | 37.3 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/gn/mask_rcnn_r101_fpn_gn_3x_20180113-a14ffb96.pth) | +| R-50-FPN (c) | Mask R-CNN | 2x | 7.2 | 0.806 | 5.4 | 39.7 | 35.9 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/gn/mask_rcnn_r50_fpn_gn_contrib_2x_20180113-ec93305c.pth) | +| R-50-FPN (c) | Mask R-CNN | 3x | 7.2 | 0.806 | 5.4 | 40.0 | 36.2 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/gn/mask_rcnn_r50_fpn_gn_contrib_3x_20180113-9d230cab.pth) | + +**Notes:** +- (d) means pretrained model converted from Detectron, and (c) means the contributed model pretrained by [@thangvubk](https://github.com/thangvubk). +- The `3x` schedule is epoch [28, 34, 36]. +- **Memory, Train/Inf time is outdated.** \ No newline at end of file diff --git a/configs/gn/mask_rcnn_r101_fpn_gn_2x.py b/configs/gn/mask_rcnn_r101_fpn_gn_2x.py new file mode 100644 index 0000000..3f61dc4 --- /dev/null +++ b/configs/gn/mask_rcnn_r101_fpn_gn_2x.py @@ -0,0 +1,184 @@ +# model settings +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) + +model = dict( + type='MaskRCNN', + pretrained='open-mmlab://detectron/resnet101_gn', + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + norm_cfg=norm_cfg), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5, + norm_cfg=norm_cfg), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='ConvFCBBoxHead', + num_shared_convs=4, + num_shared_fcs=1, + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False, + norm_cfg=norm_cfg), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=81, + norm_cfg=norm_cfg)) + +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=True, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[16, 22]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 24 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/mask_rcnn_r101_fpn_gn_2x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/gn/mask_rcnn_r50_fpn_gn_2x.py b/configs/gn/mask_rcnn_r50_fpn_gn_2x.py new file mode 100644 index 0000000..165c4aa --- /dev/null +++ b/configs/gn/mask_rcnn_r50_fpn_gn_2x.py @@ -0,0 +1,184 @@ +# model settings +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) + +model = dict( + type='MaskRCNN', + pretrained='open-mmlab://detectron/resnet50_gn', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + norm_cfg=norm_cfg), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5, + norm_cfg=norm_cfg), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='ConvFCBBoxHead', + num_shared_convs=4, + num_shared_fcs=1, + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False, + norm_cfg=norm_cfg), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=81, + norm_cfg=norm_cfg)) + +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=True, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[16, 22]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 24 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/mask_rcnn_r50_fpn_gn_2x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/gn/mask_rcnn_r50_fpn_gn_contrib_2x.py b/configs/gn/mask_rcnn_r50_fpn_gn_contrib_2x.py new file mode 100644 index 0000000..00760fb --- /dev/null +++ b/configs/gn/mask_rcnn_r50_fpn_gn_contrib_2x.py @@ -0,0 +1,184 @@ +# model settings +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) + +model = dict( + type='MaskRCNN', + pretrained='open-mmlab://contrib/resnet50_gn', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + norm_cfg=norm_cfg), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5, + norm_cfg=norm_cfg), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='ConvFCBBoxHead', + num_shared_convs=4, + num_shared_fcs=1, + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False, + norm_cfg=norm_cfg), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=81, + norm_cfg=norm_cfg)) + +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=True, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[16, 22]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 24 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/mask_rcnn_r50_fpn_gn_contrib_2x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/grid_rcnn_r50_fpn_2x.py b/configs/grid_rcnn_r50_fpn_2x.py new file mode 100644 index 0000000..a9da88f --- /dev/null +++ b/configs/grid_rcnn_r50_fpn_2x.py @@ -0,0 +1,177 @@ +# model settings +model = dict( + type='GridRCNN', + pretrained='modelzoo://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_eval=True, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='SharedFCBBoxHeadGrid', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False), + grid_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + grid_head=dict( + type='GridHead', + num_convs=8, + in_channels=256, + conv_out_channels=576, + num_grids=9)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=56, + num_grids=9, + pos_weight=-1, + max_num_grid=192, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.03, + nms=dict(type='nms', iou_thr=0.3), + max_per_img=100)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=True, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=3665, + warmup_ratio=1.0 / 80, + step=[17, 23]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 25 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './grid4' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/htc/README.md b/configs/htc/README.md new file mode 100644 index 0000000..7a819ed --- /dev/null +++ b/configs/htc/README.md @@ -0,0 +1,55 @@ +# Hybrid Task Cascade for Instance Segmentation + +## Introduction + +We provide config files to reproduce the results in the CVPR 2019 paper for [Hybrid Task Cascade](https://arxiv.org/abs/1901.07518). + +``` +@inproceedings{chen2019hybrid, + title={Hybrid task cascade for instance segmentation}, + author={Chen, Kai and Pang, Jiangmiao and Wang, Jiaqi and Xiong, Yu and Li, Xiaoxiao and Sun, Shuyang and Feng, Wansen and Liu, Ziwei and Shi, Jianping and Ouyang, Wanli and Chen Change Loy and Dahua Lin}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + year={2019} +} +``` + +## Dataset + +HTC requires COCO and COCO-stuff dataset for training. You need to download and extract it in the COCO dataset path. +The directory should be like this. + +``` +mmdetection +├── mmdet +├── tools +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ ├── train2017 +│ │ ├── val2017 +│ │ ├── test2017 +| | ├── stuffthingmaps +``` + +## Results and Models + +The results on COCO 2017val is shown in the below table. (results on test-dev are usually slightly higher than val) + +| Backbone | Style | Lr schd | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | mask AP | Download | +|:---------:|:-------:|:-------:|:--------:|:-------------------:|:--------------:|:------:|:-------:|:--------:| +| R-50-FPN | pytorch | 1x | 7.4 | 0.936 | 4.1 | 42.1 | 37.3 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/htc/htc_r50_fpn_1x_20190408-878c1712.pth) | +| R-50-FPN | pytorch | 20e | - | - | - | 43.2 | 38.1 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/htc/htc_r50_fpn_20e_20190408-c03b7015.pth) | +| R-101-FPN | pytorch | 20e | 9.3 | 1.051 | 4.0 | 44.9 | 39.4 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/htc/htc_r101_fpn_20e_20190408-a2e586db.pth) | +| X-101-32x4d-FPN | pytorch |20e| 5.8 | 0.769 | 3.8 | 46.1 | 40.3 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/htc/htc_x101_32x4d_fpn_20e_20190408-9eae4d0b.pth) | +| X-101-64x4d-FPN | pytorch |20e| 7.5 | 1.120 | 3.5 | 46.9 | 40.8 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/htc/htc_x101_64x4d_fpn_20e_20190408-497f2561.pth) | + +- In the HTC paper and COCO 2018 Challenge, `score_thr` is set to 0.001 for both baselines and HTC. +- We use 8 GPUs with 2 images/GPU for R-50 and R-101 models, and 16 GPUs with 1 image/GPU for X-101 models. +If you would like to train X-101 HTC with 8 GPUs, you need to change the lr from 0.02 to 0.01. + +We also provide a powerful HTC with DCN and multi-scale training model. No testing augmentation is used. + +| Backbone | Style | DCN | training scales | Lr schd | box AP | mask AP | Download | +|:----------------:|:-------:|:-----:|:---------------:|:-------:|:------:|:-------:|:--------:| +| X-101-64x4d-FPN | pytorch | c3-c5 | 400~1400 | 20e | 50.7 | 43.9 | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/htc/htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e_20190408-0e50669c.pth) | \ No newline at end of file diff --git a/configs/htc/htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e.py b/configs/htc/htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e.py new file mode 100644 index 0000000..dbebfe3 --- /dev/null +++ b/configs/htc/htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e.py @@ -0,0 +1,263 @@ +# model settings +model = dict( + type='HybridTaskCascade', + num_stages=3, + pretrained='open-mmlab://resnext101_64x4d', + interleaved=True, + mask_info_flow=True, + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + dcn=dict( + modulated=False, + groups=64, + deformable_groups=1, + fallback_on_stride=False), + stage_with_dcn=(False, True, True, True)), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067], + reg_class_agnostic=True) + ], + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='HTCMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=81), + semantic_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[8]), + semantic_head=dict( + type='FusedSemanticHead', + num_ins=5, + fusion_level=1, + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=183, + ignore_label=255, + loss_weight=0.2)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False) + ], + stage_loss_weights=[1, 0.5, 0.25]) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.001, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5), + keep_all_stages=False) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=1, + workers_per_gpu=1, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=[(1600, 400), (1600, 1400)], + multiscale_mode='range', + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + seg_prefix=data_root + 'stuffthingmaps/train2017/', + seg_scale_factor=1 / 8, + with_mask=True, + with_crowd=True, + with_label=True, + with_semantic_seg=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[16, 19]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 20 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/htc/htc_r101_fpn_20e.py b/configs/htc/htc_r101_fpn_20e.py new file mode 100644 index 0000000..8736c81 --- /dev/null +++ b/configs/htc/htc_r101_fpn_20e.py @@ -0,0 +1,254 @@ +# model settings +model = dict( + type='HybridTaskCascade', + num_stages=3, + pretrained='modelzoo://resnet101', + interleaved=True, + mask_info_flow=True, + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067], + reg_class_agnostic=True) + ], + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='HTCMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=81), + semantic_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[8]), + semantic_head=dict( + type='FusedSemanticHead', + num_ins=5, + fusion_level=1, + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=183, + ignore_label=255, + loss_weight=0.2)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False) + ], + stage_loss_weights=[1, 0.5, 0.25]) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.001, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5), + keep_all_stages=False) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + seg_prefix=data_root + 'stuffthingmaps/train2017/', + seg_scale_factor=1 / 8, + with_mask=True, + with_crowd=True, + with_label=True, + with_semantic_seg=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[16, 19]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 20 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/htc_r101_fpn_20e' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/htc/htc_r50_fpn_1x.py b/configs/htc/htc_r50_fpn_1x.py new file mode 100644 index 0000000..2291470 --- /dev/null +++ b/configs/htc/htc_r50_fpn_1x.py @@ -0,0 +1,254 @@ +# model settings +model = dict( + type='HybridTaskCascade', + num_stages=3, + pretrained='modelzoo://resnet50', + interleaved=True, + mask_info_flow=True, + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067], + reg_class_agnostic=True) + ], + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='HTCMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=81), + semantic_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[8]), + semantic_head=dict( + type='FusedSemanticHead', + num_ins=5, + fusion_level=1, + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=183, + ignore_label=255, + loss_weight=0.2)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False) + ], + stage_loss_weights=[1, 0.5, 0.25]) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.001, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5), + keep_all_stages=False) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + seg_prefix=data_root + 'stuffthingmaps/train2017/', + seg_scale_factor=1 / 8, + with_mask=True, + with_crowd=True, + with_label=True, + with_semantic_seg=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/htc_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/htc/htc_r50_fpn_20e.py b/configs/htc/htc_r50_fpn_20e.py new file mode 100644 index 0000000..e37cae2 --- /dev/null +++ b/configs/htc/htc_r50_fpn_20e.py @@ -0,0 +1,254 @@ +# model settings +model = dict( + type='HybridTaskCascade', + num_stages=3, + pretrained='modelzoo://resnet50', + interleaved=True, + mask_info_flow=True, + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067], + reg_class_agnostic=True) + ], + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='HTCMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=81), + semantic_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[8]), + semantic_head=dict( + type='FusedSemanticHead', + num_ins=5, + fusion_level=1, + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=183, + ignore_label=255, + loss_weight=0.2)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False) + ], + stage_loss_weights=[1, 0.5, 0.25]) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.001, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5), + keep_all_stages=False) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + seg_prefix=data_root + 'stuffthingmaps/train2017/', + seg_scale_factor=1 / 8, + with_mask=True, + with_crowd=True, + with_label=True, + with_semantic_seg=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[16, 19]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 20 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/htc_r50_fpn_20e' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/htc/htc_without_semantic_r50_fpn_1x.py b/configs/htc/htc_without_semantic_r50_fpn_1x.py new file mode 100644 index 0000000..83d4537 --- /dev/null +++ b/configs/htc/htc_without_semantic_r50_fpn_1x.py @@ -0,0 +1,236 @@ +# model settings +model = dict( + type='HybridTaskCascade', + num_stages=3, + pretrained='modelzoo://resnet50', + interleaved=True, + mask_info_flow=True, + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067], + reg_class_agnostic=True) + ], + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='HTCMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=81)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False) + ], + stage_loss_weights=[1, 0.5, 0.25]) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.001, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5), + keep_all_stages=False) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=True, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/htc_without_semantic_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/htc/htc_x101_32x4d_fpn_20e_16gpu.py b/configs/htc/htc_x101_32x4d_fpn_20e_16gpu.py new file mode 100644 index 0000000..7a1f234 --- /dev/null +++ b/configs/htc/htc_x101_32x4d_fpn_20e_16gpu.py @@ -0,0 +1,256 @@ +# model settings +model = dict( + type='HybridTaskCascade', + num_stages=3, + pretrained='open-mmlab://resnext101_32x4d', + interleaved=True, + mask_info_flow=True, + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067], + reg_class_agnostic=True) + ], + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='HTCMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=81), + semantic_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[8]), + semantic_head=dict( + type='FusedSemanticHead', + num_ins=5, + fusion_level=1, + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=183, + ignore_label=255, + loss_weight=0.2)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False) + ], + stage_loss_weights=[1, 0.5, 0.25]) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.001, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5), + keep_all_stages=False) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=1, + workers_per_gpu=1, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + seg_prefix=data_root + 'stuffthingmaps/train2017/', + seg_scale_factor=1 / 8, + with_mask=True, + with_crowd=True, + with_label=True, + with_semantic_seg=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[16, 19]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 20 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/htc_x101_32x4d_fpn_20e' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/htc/htc_x101_64x4d_fpn_20e_16gpu.py b/configs/htc/htc_x101_64x4d_fpn_20e_16gpu.py new file mode 100644 index 0000000..025b36d --- /dev/null +++ b/configs/htc/htc_x101_64x4d_fpn_20e_16gpu.py @@ -0,0 +1,256 @@ +# model settings +model = dict( + type='HybridTaskCascade', + num_stages=3, + pretrained='open-mmlab://resnext101_64x4d', + interleaved=True, + mask_info_flow=True, + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1], + reg_class_agnostic=True), + dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067], + reg_class_agnostic=True) + ], + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='HTCMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=81), + semantic_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[8]), + semantic_head=dict( + type='FusedSemanticHead', + num_ins=5, + fusion_level=1, + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=183, + ignore_label=255, + loss_weight=0.2)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False) + ], + stage_loss_weights=[1, 0.5, 0.25]) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.001, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5), + keep_all_stages=False) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=1, + workers_per_gpu=1, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + seg_prefix=data_root + 'stuffthingmaps/train2017/', + seg_scale_factor=1 / 8, + with_mask=True, + with_crowd=True, + with_label=True, + with_semantic_seg=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[16, 19]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 20 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/htc_x101_64x4d_fpn_20e' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/mask_rcnn_r101_fpn_1x.py b/configs/mask_rcnn_r101_fpn_1x.py new file mode 100644 index 0000000..1059120 --- /dev/null +++ b/configs/mask_rcnn_r101_fpn_1x.py @@ -0,0 +1,175 @@ +# model settings +model = dict( + type='MaskRCNN', + pretrained='modelzoo://resnet101', + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=81)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=True, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/mask_rcnn_r101_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/mask_rcnn_r50_caffe_c4_1x.py b/configs/mask_rcnn_r50_caffe_c4_1x.py new file mode 100644 index 0000000..4173b29 --- /dev/null +++ b/configs/mask_rcnn_r50_caffe_c4_1x.py @@ -0,0 +1,179 @@ +# model settings +norm_cfg = dict(type='BN', requires_grad=False) +model = dict( + type='MaskRCNN', + pretrained='open-mmlab://resnet50_caffe', + backbone=dict( + type='ResNet', + depth=50, + num_stages=3, + strides=(1, 2, 2), + dilations=(1, 1, 1), + out_indices=(2, ), + frozen_stages=1, + norm_cfg=norm_cfg, + norm_eval=True, + style='caffe'), + shared_head=dict( + type='ResLayer', + depth=50, + stage=3, + stride=2, + dilation=1, + style='caffe', + norm_cfg=norm_cfg, + norm_eval=True), + rpn_head=dict( + type='RPNHead', + in_channels=1024, + feat_channels=1024, + anchor_scales=[2, 4, 8, 16, 32], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[16], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=1024, + featmap_strides=[16]), + bbox_head=dict( + type='BBoxHead', + with_avg_pool=True, + roi_feat_size=7, + in_channels=2048, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False), + mask_roi_extractor=None, + mask_head=dict( + type='FCNMaskHead', + num_convs=0, + in_channels=2048, + conv_out_channels=256, + num_classes=81)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=12000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=14, + pos_weight=-1, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=6000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) +data = dict( + imgs_per_gpu=1, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=True, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/mask_rcnn_r50_caffe_c4_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/mask_rcnn_r50_fpn_1x.py b/configs/mask_rcnn_r50_fpn_1x.py new file mode 100644 index 0000000..eb4330b --- /dev/null +++ b/configs/mask_rcnn_r50_fpn_1x.py @@ -0,0 +1,175 @@ +# model settings +model = dict( + type='MaskRCNN', + pretrained='modelzoo://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=81)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=True, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/mask_rcnn_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/mask_rcnn_x101_32x4d_fpn_1x.py b/configs/mask_rcnn_x101_32x4d_fpn_1x.py new file mode 100644 index 0000000..6772f2b --- /dev/null +++ b/configs/mask_rcnn_x101_32x4d_fpn_1x.py @@ -0,0 +1,177 @@ +# model settings +model = dict( + type='MaskRCNN', + pretrained='open-mmlab://resnext101_32x4d', + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=81)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=True, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/mask_rcnn_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/mask_rcnn_x101_64x4d_fpn_1x.py b/configs/mask_rcnn_x101_64x4d_fpn_1x.py new file mode 100644 index 0000000..8c61e33 --- /dev/null +++ b/configs/mask_rcnn_x101_64x4d_fpn_1x.py @@ -0,0 +1,177 @@ +# model settings +model = dict( + type='MaskRCNN', + pretrained='open-mmlab://resnext101_64x4d', + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=81)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100, + mask_thr_binary=0.5)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=True, + with_crowd=True, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/mask_rcnn_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py b/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py new file mode 100644 index 0000000..88f0ae0 --- /dev/null +++ b/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py @@ -0,0 +1,164 @@ +# model settings +model = dict( + type='FasterRCNN', + pretrained='modelzoo://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='SharedFCBBoxHead', + num_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=21, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100) + # soft-nms is also supported for rcnn testing + # e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05) +) +# dataset settings +dataset_type = 'VOCDataset' +data_root = 'data/VOCdevkit/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='RepeatDataset', # to avoid reloading datasets frequently + times=3, + dataset=dict( + type=dataset_type, + ann_file=[ + data_root + 'VOC2007/ImageSets/Main/trainval.txt', + data_root + 'VOC2012/ImageSets/Main/trainval.txt' + ], + img_prefix=[data_root + 'VOC2007/', data_root + 'VOC2012/'], + img_scale=(1000, 600), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=True, + with_label=True)), + val=dict( + type=dataset_type, + ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', + img_prefix=data_root + 'VOC2007/', + img_scale=(1000, 600), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=True, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', + img_prefix=data_root + 'VOC2007/', + img_scale=(1000, 600), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[3]) # actual epoch = 3 * 3 = 9 +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 4 # actual epoch = 4 * 3 = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/faster_rcnn_r50_fpn_1x_voc0712' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/pascal_voc/ssd300_voc.py b/configs/pascal_voc/ssd300_voc.py new file mode 100644 index 0000000..551ecda --- /dev/null +++ b/configs/pascal_voc/ssd300_voc.py @@ -0,0 +1,134 @@ +# model settings +input_size = 300 +model = dict( + type='SingleStageDetector', + pretrained='open-mmlab://vgg16_caffe', + backbone=dict( + type='SSDVGG', + input_size=input_size, + depth=16, + with_last_pool=False, + ceil_mode=True, + out_indices=(3, 4), + out_feature_indices=(22, 34), + l2_norm_scale=20), + neck=None, + bbox_head=dict( + type='SSDHead', + input_size=input_size, + in_channels=(512, 1024, 512, 256, 256, 256), + num_classes=21, + anchor_strides=(8, 16, 32, 64, 100, 300), + basesize_ratio_range=(0.2, 0.9), + anchor_ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]), + target_means=(.0, .0, .0, .0), + target_stds=(0.1, 0.1, 0.2, 0.2))) +cudnn_benchmark = True +train_cfg = dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0., + ignore_iof_thr=-1, + gt_max_assign_all=False), + smoothl1_beta=1., + allowed_border=-1, + pos_weight=-1, + neg_pos_ratio=3, + debug=False) +test_cfg = dict( + nms=dict(type='nms', iou_thr=0.45), + min_bbox_size=0, + score_thr=0.02, + max_per_img=200) +# model training and testing settings +# dataset settings +dataset_type = 'VOCDataset' +data_root = 'data/VOCdevkit/' +img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) +data = dict( + imgs_per_gpu=4, + workers_per_gpu=2, + train=dict( + type='RepeatDataset', + times=10, + dataset=dict( + type=dataset_type, + ann_file=[ + data_root + 'VOC2007/ImageSets/Main/trainval.txt', + data_root + 'VOC2012/ImageSets/Main/trainval.txt' + ], + img_prefix=[data_root + 'VOC2007/', data_root + 'VOC2012/'], + img_scale=(300, 300), + img_norm_cfg=img_norm_cfg, + size_divisor=None, + flip_ratio=0.5, + with_mask=False, + with_crowd=False, + with_label=True, + test_mode=False, + extra_aug=dict( + photo_metric_distortion=dict( + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + expand=dict( + mean=img_norm_cfg['mean'], + to_rgb=img_norm_cfg['to_rgb'], + ratio_range=(1, 4)), + random_crop=dict( + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3)), + resize_keep_ratio=False)), + val=dict( + type=dataset_type, + ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', + img_prefix=data_root + 'VOC2007/', + img_scale=(300, 300), + img_norm_cfg=img_norm_cfg, + size_divisor=None, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True, + resize_keep_ratio=False), + test=dict( + type=dataset_type, + ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', + img_prefix=data_root + 'VOC2007/', + img_scale=(300, 300), + img_norm_cfg=img_norm_cfg, + size_divisor=None, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True, + resize_keep_ratio=False)) +# optimizer +optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4) +optimizer_config = dict() +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[16, 20]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 24 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/ssd300_voc' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/pascal_voc/ssd512_voc.py b/configs/pascal_voc/ssd512_voc.py new file mode 100644 index 0000000..f01404a --- /dev/null +++ b/configs/pascal_voc/ssd512_voc.py @@ -0,0 +1,134 @@ +# model settings +input_size = 512 +model = dict( + type='SingleStageDetector', + pretrained='open-mmlab://vgg16_caffe', + backbone=dict( + type='SSDVGG', + input_size=input_size, + depth=16, + with_last_pool=False, + ceil_mode=True, + out_indices=(3, 4), + out_feature_indices=(22, 34), + l2_norm_scale=20), + neck=None, + bbox_head=dict( + type='SSDHead', + input_size=input_size, + in_channels=(512, 1024, 512, 256, 256, 256, 256), + num_classes=21, + anchor_strides=(8, 16, 32, 64, 128, 256, 512), + basesize_ratio_range=(0.15, 0.9), + anchor_ratios=([2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]), + target_means=(.0, .0, .0, .0), + target_stds=(0.1, 0.1, 0.2, 0.2))) +cudnn_benchmark = True +train_cfg = dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0., + ignore_iof_thr=-1, + gt_max_assign_all=False), + smoothl1_beta=1., + allowed_border=-1, + pos_weight=-1, + neg_pos_ratio=3, + debug=False) +test_cfg = dict( + nms=dict(type='nms', iou_thr=0.45), + min_bbox_size=0, + score_thr=0.02, + max_per_img=200) +# model training and testing settings +# dataset settings +dataset_type = 'VOCDataset' +data_root = 'data/VOCdevkit/' +img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) +data = dict( + imgs_per_gpu=4, + workers_per_gpu=2, + train=dict( + type='RepeatDataset', + times=10, + dataset=dict( + type=dataset_type, + ann_file=[ + data_root + 'VOC2007/ImageSets/Main/trainval.txt', + data_root + 'VOC2012/ImageSets/Main/trainval.txt' + ], + img_prefix=[data_root + 'VOC2007/', data_root + 'VOC2012/'], + img_scale=(512, 512), + img_norm_cfg=img_norm_cfg, + size_divisor=None, + flip_ratio=0.5, + with_mask=False, + with_crowd=False, + with_label=True, + test_mode=False, + extra_aug=dict( + photo_metric_distortion=dict( + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + expand=dict( + mean=img_norm_cfg['mean'], + to_rgb=img_norm_cfg['to_rgb'], + ratio_range=(1, 4)), + random_crop=dict( + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3)), + resize_keep_ratio=False)), + val=dict( + type=dataset_type, + ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', + img_prefix=data_root + 'VOC2007/', + img_scale=(512, 512), + img_norm_cfg=img_norm_cfg, + size_divisor=None, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True, + resize_keep_ratio=False), + test=dict( + type=dataset_type, + ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', + img_prefix=data_root + 'VOC2007/', + img_scale=(512, 512), + img_norm_cfg=img_norm_cfg, + size_divisor=None, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True, + resize_keep_ratio=False)) +# optimizer +optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4) +optimizer_config = dict() +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[16, 20]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 24 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/ssd512_voc' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/retinanet_r101_fpn_1x.py b/configs/retinanet_r101_fpn_1x.py new file mode 100644 index 0000000..e07d98a --- /dev/null +++ b/configs/retinanet_r101_fpn_1x.py @@ -0,0 +1,120 @@ +# model settings +model = dict( + type='RetinaNet', + pretrained='modelzoo://resnet101', + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs=True, + num_outs=5), + bbox_head=dict( + type='RetinaHead', + num_classes=81, + in_channels=256, + stacked_convs=4, + feat_channels=256, + octave_base_scale=4, + scales_per_octave=3, + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[8, 16, 32, 64, 128], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0])) +# training and testing settings +train_cfg = dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + smoothl1_beta=0.11, + gamma=2.0, + alpha=0.25, + allowed_border=-1, + pos_weight=-1, + debug=False) +test_cfg = dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=False, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=False, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +device_ids = range(8) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/retinanet_r101_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/retinanet_r50_fpn_1x.py b/configs/retinanet_r50_fpn_1x.py new file mode 100644 index 0000000..2840c06 --- /dev/null +++ b/configs/retinanet_r50_fpn_1x.py @@ -0,0 +1,120 @@ +# model settings +model = dict( + type='RetinaNet', + pretrained='modelzoo://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs=True, + num_outs=5), + bbox_head=dict( + type='RetinaHead', + num_classes=81, + in_channels=256, + stacked_convs=4, + feat_channels=256, + octave_base_scale=4, + scales_per_octave=3, + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[8, 16, 32, 64, 128], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0])) +# training and testing settings +train_cfg = dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + smoothl1_beta=0.11, + gamma=2.0, + alpha=0.25, + allowed_border=-1, + pos_weight=-1, + debug=False) +test_cfg = dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=False, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=False, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +device_ids = range(8) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/retinanet_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/retinanet_x101_32x4d_fpn_1x.py b/configs/retinanet_x101_32x4d_fpn_1x.py new file mode 100644 index 0000000..3f7741b --- /dev/null +++ b/configs/retinanet_x101_32x4d_fpn_1x.py @@ -0,0 +1,122 @@ +# model settings +model = dict( + type='RetinaNet', + pretrained='open-mmlab://resnext101_32x4d', + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs=True, + num_outs=5), + bbox_head=dict( + type='RetinaHead', + num_classes=81, + in_channels=256, + stacked_convs=4, + feat_channels=256, + octave_base_scale=4, + scales_per_octave=3, + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[8, 16, 32, 64, 128], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0])) +# training and testing settings +train_cfg = dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + smoothl1_beta=0.11, + gamma=2.0, + alpha=0.25, + allowed_border=-1, + pos_weight=-1, + debug=False) +test_cfg = dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=False, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=False, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +device_ids = range(8) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/retinanet_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/retinanet_x101_64x4d_fpn_1x.py b/configs/retinanet_x101_64x4d_fpn_1x.py new file mode 100644 index 0000000..2ef8b53 --- /dev/null +++ b/configs/retinanet_x101_64x4d_fpn_1x.py @@ -0,0 +1,122 @@ +# model settings +model = dict( + type='RetinaNet', + pretrained='open-mmlab://resnext101_64x4d', + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs=True, + num_outs=5), + bbox_head=dict( + type='RetinaHead', + num_classes=81, + in_channels=256, + stacked_convs=4, + feat_channels=256, + octave_base_scale=4, + scales_per_octave=3, + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[8, 16, 32, 64, 128], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0])) +# training and testing settings +train_cfg = dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + smoothl1_beta=0.11, + gamma=2.0, + alpha=0.25, + allowed_border=-1, + pos_weight=-1, + debug=False) +test_cfg = dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=False, + with_label=True), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=False, + with_label=True), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +device_ids = range(8) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/retinanet_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/rpn_r101_fpn_1x.py b/configs/rpn_r101_fpn_1x.py new file mode 100644 index 0000000..450215e --- /dev/null +++ b/configs/rpn_r101_fpn_1x.py @@ -0,0 +1,121 @@ +# model settings +model = dict( + type='RPN', + pretrained='modelzoo://resnet101', + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=False, + with_label=False), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=False, + with_label=False), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +# runner configs +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/rpn_r101_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/rpn_r50_caffe_c4_1x.py b/configs/rpn_r50_caffe_c4_1x.py new file mode 100644 index 0000000..373603f --- /dev/null +++ b/configs/rpn_r50_caffe_c4_1x.py @@ -0,0 +1,121 @@ +# model settings +model = dict( + type='RPN', + pretrained='open-mmlab://resnet50_caffe', + backbone=dict( + type='ResNet', + depth=50, + num_stages=3, + strides=(1, 2, 2), + dilations=(1, 1, 1), + out_indices=(2, ), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='caffe'), + neck=None, + rpn_head=dict( + type='RPNHead', + in_channels=1024, + feat_channels=1024, + anchor_scales=[2, 4, 8, 16, 32], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[16], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=12000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=False, + with_label=False), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=False, + with_label=False), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +# runner configs +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/rpn_r50_caffe_c4_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/rpn_r50_fpn_1x.py b/configs/rpn_r50_fpn_1x.py new file mode 100644 index 0000000..3af2649 --- /dev/null +++ b/configs/rpn_r50_fpn_1x.py @@ -0,0 +1,121 @@ +# model settings +model = dict( + type='RPN', + pretrained='modelzoo://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=False, + with_label=False), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=False, + with_label=False), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +# runner configs +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/rpn_r50_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/rpn_x101_32x4d_fpn_1x.py b/configs/rpn_x101_32x4d_fpn_1x.py new file mode 100644 index 0000000..c23d715 --- /dev/null +++ b/configs/rpn_x101_32x4d_fpn_1x.py @@ -0,0 +1,123 @@ +# model settings +model = dict( + type='RPN', + pretrained='open-mmlab://resnext101_32x4d', + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=False, + with_label=False), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=False, + with_label=False), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +# runner configs +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/rpn_r101_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/rpn_x101_64x4d_fpn_1x.py b/configs/rpn_x101_64x4d_fpn_1x.py new file mode 100644 index 0000000..c34a146 --- /dev/null +++ b/configs/rpn_x101_64x4d_fpn_1x.py @@ -0,0 +1,123 @@ +# model settings +model = dict( + type='RPN', + pretrained='open-mmlab://resnext101_64x4d', + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_scales=[8], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + use_sigmoid_cls=True)) +# model training and testing settings +train_cfg = dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + smoothl1_beta=1 / 9.0, + debug=False)) +test_cfg = dict( + rpn=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0.5, + with_mask=False, + with_crowd=False, + with_label=False), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_crowd=False, + with_label=False), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(1333, 800), + img_norm_cfg=img_norm_cfg, + size_divisor=32, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +# runner configs +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/rpn_r101_fpn_1x' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/ssd300_coco.py b/configs/ssd300_coco.py new file mode 100644 index 0000000..e48a6e6 --- /dev/null +++ b/configs/ssd300_coco.py @@ -0,0 +1,131 @@ +# model settings +input_size = 300 +model = dict( + type='SingleStageDetector', + pretrained='open-mmlab://vgg16_caffe', + backbone=dict( + type='SSDVGG', + input_size=input_size, + depth=16, + with_last_pool=False, + ceil_mode=True, + out_indices=(3, 4), + out_feature_indices=(22, 34), + l2_norm_scale=20), + neck=None, + bbox_head=dict( + type='SSDHead', + input_size=input_size, + in_channels=(512, 1024, 512, 256, 256, 256), + num_classes=81, + anchor_strides=(8, 16, 32, 64, 100, 300), + basesize_ratio_range=(0.15, 0.9), + anchor_ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]), + target_means=(.0, .0, .0, .0), + target_stds=(0.1, 0.1, 0.2, 0.2))) +cudnn_benchmark = True +train_cfg = dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0., + ignore_iof_thr=-1, + gt_max_assign_all=False), + smoothl1_beta=1., + allowed_border=-1, + pos_weight=-1, + neg_pos_ratio=3, + debug=False) +test_cfg = dict( + nms=dict(type='nms', iou_thr=0.45), + min_bbox_size=0, + score_thr=0.02, + max_per_img=200) +# model training and testing settings +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) +data = dict( + imgs_per_gpu=8, + workers_per_gpu=3, + train=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(300, 300), + img_norm_cfg=img_norm_cfg, + size_divisor=None, + flip_ratio=0.5, + with_mask=False, + with_crowd=False, + with_label=True, + test_mode=False, + extra_aug=dict( + photo_metric_distortion=dict( + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + expand=dict( + mean=img_norm_cfg['mean'], + to_rgb=img_norm_cfg['to_rgb'], + ratio_range=(1, 4)), + random_crop=dict( + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3)), + resize_keep_ratio=False)), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(300, 300), + img_norm_cfg=img_norm_cfg, + size_divisor=None, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True, + resize_keep_ratio=False), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(300, 300), + img_norm_cfg=img_norm_cfg, + size_divisor=None, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True, + resize_keep_ratio=False)) +# optimizer +optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4) +optimizer_config = dict() +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[16, 22]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 24 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/ssd300_coco' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/ssd512_coco.py b/configs/ssd512_coco.py new file mode 100644 index 0000000..5824263 --- /dev/null +++ b/configs/ssd512_coco.py @@ -0,0 +1,131 @@ +# model settings +input_size = 512 +model = dict( + type='SingleStageDetector', + pretrained='open-mmlab://vgg16_caffe', + backbone=dict( + type='SSDVGG', + input_size=input_size, + depth=16, + with_last_pool=False, + ceil_mode=True, + out_indices=(3, 4), + out_feature_indices=(22, 34), + l2_norm_scale=20), + neck=None, + bbox_head=dict( + type='SSDHead', + input_size=input_size, + in_channels=(512, 1024, 512, 256, 256, 256, 256), + num_classes=81, + anchor_strides=(8, 16, 32, 64, 128, 256, 512), + basesize_ratio_range=(0.1, 0.9), + anchor_ratios=([2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]), + target_means=(.0, .0, .0, .0), + target_stds=(0.1, 0.1, 0.2, 0.2))) +cudnn_benchmark = True +train_cfg = dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0., + ignore_iof_thr=-1, + gt_max_assign_all=False), + smoothl1_beta=1., + allowed_border=-1, + pos_weight=-1, + neg_pos_ratio=3, + debug=False) +test_cfg = dict( + nms=dict(type='nms', iou_thr=0.45), + min_bbox_size=0, + score_thr=0.02, + max_per_img=200) +# model training and testing settings +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) +data = dict( + imgs_per_gpu=8, + workers_per_gpu=3, + train=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + img_scale=(512, 512), + img_norm_cfg=img_norm_cfg, + size_divisor=None, + flip_ratio=0.5, + with_mask=False, + with_crowd=False, + with_label=True, + test_mode=False, + extra_aug=dict( + photo_metric_distortion=dict( + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + expand=dict( + mean=img_norm_cfg['mean'], + to_rgb=img_norm_cfg['to_rgb'], + ratio_range=(1, 4)), + random_crop=dict( + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3)), + resize_keep_ratio=False)), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(512, 512), + img_norm_cfg=img_norm_cfg, + size_divisor=None, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True, + resize_keep_ratio=False), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + img_scale=(512, 512), + img_norm_cfg=img_norm_cfg, + size_divisor=None, + flip_ratio=0, + with_mask=False, + with_label=False, + test_mode=True, + resize_keep_ratio=False)) +# optimizer +optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4) +optimizer_config = dict() +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[16, 22]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 24 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/ssd512_coco' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/demo/coco_test_12510.jpg b/demo/coco_test_12510.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1271ae1d8a3e9b052d06e72f7b764887d171d96e GIT binary patch literal 183096 zcmbTdWl$VV)IYj-aJS&FgrLFQ6Ffk0m*C5?xGZkLB}iBtLU4C?w*bLmaSsq+frKoY zKhOJC-TUQ!xcBx{pPK3EnL5AeQ$0ujt^C^t5UDDvC<0JW005Nd1MqJR@D_lI@?ZOp zqW;&=(f*?t=;&zZm>8It|I@LsUtnTkV_{;xzV zR_iYMeo4?50yZ>;Z0MPyy)-(No#6|p!3-!M>!1@mt3aZa@ zML8_59bDfL6W{sTkn;akQ@h5H$KfiO)b})HhwcmGUTMS7`U#y7&ip^W!#45G2CN_86RTs%Dbu$(*GYbD+sOe!cO|oZvoc#8FV2$4JHc^al6F)2N=G4pA5BP z=MplL(Q8XS&vY){K))RNBsr^eAnL?xsOx#VKrgWUco?_i61&7QatG;YELqcJEh!*B z8(xsB-Va@nwz5rUAH03*As|mwS@5WkPSCQ>>GdkxojYDkie~X zRzn4UeT@$BmR7XFg7xJ%lOI@_@@5!Kt2cwqIGY1W8$K8Rz7Cu&eN_!?vZ(H084%K6 z3eW_XDY#YigMC01EnL5Y2p4!q$jCGF- z6B%JXYOBN}e+ZkaJXs|RZ6sqv#hpb&fWfW|@#c#8@yp56j@<7qnAzXp6dmo!F*w8m z9SR#XfN#$)m4AU|?0Rz>F z_*>@Az#IKj@4Oao(3(l+_=g*w@pABL>1i_Uw3e;>Ck5mdxwIvNvYuVcK7=`p5)r|uDP(DD@ zhs3|6FWs36FI78V%QmddL@$m=+wv5V+=2BsQ(>Kk;gR|n-g_);;^r@Y2<6lKgyr!sdhQ$6 z!hN+(e5pS8 zY!FMz|6XQvCBRE+SGs)%7v!8TFYf2yrr2Ru>7YgR*5^j6{kz;^kCC-qDqdfquu`*v z*qr*f#og5-fvVPxkkmK-VB>!Pi$kb>=I&&*g?^O!B}%?&%FUi_U#jCqxl4xldkxex z;I889VPy;-G^2c?2kh<71Ser(~aRo`bi{7z{)JYO9cx}!q6>G!s@THXtceC2OWHI`Ox}s&PMqeF2foEB zNxBkb#h!8&w}>g}KY-o)6;3W#Ly-@LYP}3+M_3isCb|vnd=R4?LG~_9!n^fbHK`^L ztsSqm8@lh6;xBTp;)#YZf#gmO{r1}+Mz9~3!JWV$>e{-oq^mk3&e+LBCA+1&vVv1B z7LeY1w!WG1H4VRcRD#ASnV3KeBf4gU;xEU9 zlSm`rFQ$DjwhrhNAOU=>Z41rJ>37wmtG~ExNsF~SK#qcqY8W2D(m zJDiB2DSIROJ#5)J^}LSB7AsxBwrM^u`drb(*y|eESTEz`r4-d@ z)ZeQ4tND`fzpv*n@RaW(60vdKuJM?s{`oX9tXIsT4K_JxKASx&$*-we+~E)tUOm;{ z!VTcN%pNEm6&HF$-V1#>MTceiWHJ#qc2*9ydhlAV;7qt74N`|(9^Bl(_HMg^FsRj4 zVZP2n9X0mE!dTEQZ_5X*bXxa?VF*`&Gg144t4)@;vxTZ(o%2y zbIZb_@1#IOH#N@3OfuYTuErDGUR8UxqUktAx5-1H{=F1rscVfE+Hc91ljO?Qka>`} zG%+BSwwIwMh>mKuK(E@yh|2G~mqX+@9Pd{oTx-u$jnP1j=OO9AOK(04bM5uxqD2+R zEmrO$%i?P=3)(z~Q!E5c=v&mA4S)JlwyyUV6YmrIG+-~0kfp(qfYEh-%vU-nMlYyjHbkYHLZq<%@IEYF^%nEYeQ(1in9%xVVTS+bk~s_FZ;!A z3KoZ*+qke&D*=h#qE=L4PwwTI2_nOXh>(tO2oGh7>8P7eAbuC)muu9n2L6#wS-86* z&B1A?@jOH+E$|-zvHK-CD41x|tf`04qz&TqhBc90F0(MA*kMnx&HXG%MfMDayLMg? zI(N)K@<#fla4q8R+sJ9Ci>I&P=cbd;17hxhN;N@t+)u3Oc1x+=yw!EQD9|lDr17#E#RoqsPY4mXIlnF`EdG6dm?gVzs}r2;M0DS_`b{Nc_=523;On>d5Qf z=_Pzy3EHHYOE+F3b0m6uU|gnR3(_5Hs9TXwWteK;!+FaeKBWNRNv~xJ6dgId#&+0# z_}q!g&%nka%G8*@J0k{}ib(!p*Dd~?`PNwvICl_hA+2Y5cF6R?nz&|Ie|=)O$gEa8hr7*NmiTto9(_?ZWTcyMkCEc{@m3Fbuq{JKFom=* zG*g=4fw#9|?#+YCM~}jvltC;DrE_OqVyqmEEB!uoCS6sZm|}el*S;qx+VF*Lfd2tV zWvph%*FjIO5r0FQ-vGekQ&X##1LGB2j@~bg2a7dny>DPKsN}63-3~Jp6Zp-7^aW@HX`IOJ6K>?!T;Up{VA0V-b&U#l;K)BS+``EqwBj~V# zyGSD?p?zX8}_o@RkMHjMWw2^pYuqNMC$v8^s9Y)Q>RQH<(ij3+m9wo!HtH$Eyi@}74``U8LHE43HML((tBu!M0?A!&*JZt6yy@-D-r7<51o1RmUx)PRi$G5k z$KIWRrXx+=nt+(S+EtT1p8*#*+F(BB0}bNouhx>JuQmQ?SQFI1?#_L;VJhc10r2yj;jY(zqceYYKk#vh!+W z=EgvR&5to@ic7F1B{Tw+*cIrojNL`!KS0!S&rc?Oo#w^7Uf*5uSDm}}oZtk0>JrW> zqgFVKnyd*yG_eg+WRfq+j^6R^*(FeadugF~`3`eb>+*ACGoAR22}WE{vD3-2+mLF% zc;jm$w0GpV>|(t6#&;3}4YgsC*xN==(hu~_od}&HD zUrZZCP0?U{sO~IAzCrZv^v3=+GaemzdfhqiMSWhaqkJzN`?qdQwz1OOLL+*T1*kvC zK7hGnBc-d2EPPl0qbXZm_*o8xm&7~Z$H(?=+*Pn8zl8W-hq(Jjg$dCq0yI_Mg_zbvC=KgL8Xp;5QsM#sZ1rJcWA+#%pqo-OWn(WY=rl`#TgyEJm;RP&Fh+WaUc~ z4^b4&LSz)6l{XB?IK+kGVi5Cih2~B-rOJ;gAvMmZOF{lkN6I*i%m~JoSkV{U9^RaJ?gOUxT_+x>libW5@7Ib@yd=#h~f(Lw1LPt>%H-*r{bztwoc& zr=EWR7O{JEG4B(*0E^imOeA>3CyQ|4XMjP)3bCM?&#p3KQ(H)*GNLSYbzgoW0yokM zmnH4i&&>n$R|Fo~5($OjMv`!S77+zn0tBX2L`QgwZfdobC4+Z+Ki$CphChhZLj8v| zU18c=$Wqls{H#;qE;VRVs)!aVE7K0z_kjUx9k!n=pJW$_>n z-wr%W_qCaK_@M6+@crEE)mlhF*SR4ERh)*l^Y}|n6CIL2O<=I0^(>D|-lcQS4A&Y3 zHq#ohf~*XbR@7a^iIEbvN3Y zbWeFJUOcB9+n~f8((sy^@eFc85Cp001IE(7J~J1t%QP|-`@Fy2XOk#*UrM{TPCE<1 zaX#`vxR_+k8}=f4V#>#u7OZtV+`j^~fWWr4z!giDb&r`1kSMT593-h;`SVF&&TB=e zQrFGvhCG)Q{*A(0eNHu2auGiT$at<@g=-mlm`5|)wA!>brJ+{L&!vvvBsV$Eb>IUi ztLl_}pUOf~86IKBs9ze|bhEI#0I3~r^4119JsY``pBrogR%{u?8$#mMNa|iF%`mFhCYd-m7n1iK zcVI=G|52-@J@q@7o8SPq{{y0}@J;-GKo&{<1MuT9r z_(|i)4>c!!H#QcMaiu3rNS^w``E_N9X6Z|d}x-A?l5~C=xOFKk%5zO zpNw2l8Dd(N2r_n@VTJe*#}hAnm~WPN+9(nR^R|O)|ibM z$?A>azY(iVpStpdz}6VQtai}9DOlCWG5Ed>Z*snEp;t0K|DeK3htI<8#kM2};UBs3 z_*l_&lQc*h15A>Hh_~sqlSg`AL1$1@fPcz7xg8HjwWGA6K!)U2(UK#g1Hbocu=_=& zl+p1Pqa{_!ja0;cb_`7OR4>{`-dHn#4587?7*ckX8!-WCW6H-4*TK| zU3e=8-jcRCNy%xQ2q3jyjJHx&Jlv-VsXF`#Mn&4IU8YsnM(rSa8r{ka#|7^`>5EYE zvkqz0M@Dde!sno3QnB^*NoKym&~42}_An3(@0uiUpQ-LR?YDnf8|vi zB1?le6`!Pu+L<3oc?V12K8ouiy`(XUy!tVIV={$vu6$vOkH+Q0|E!I_ZD-YF6v^ee zY~tmZoC+{ORDb>ioZp+V zb3Ur4z*ni?N^o){94xRz@j=+!%OOJvZH}(rg>WC#%T+6E2B^3jyX(kM-K(!H2|QY< z$-B2zoNJrAbp~K}g5rtAyH%i@u)Ap$`;aMq+5r|k{hf&_RXwAyPL6VZ682)MPR^~1 zpVm08?!P`D!x1AEQS|(&1I9JQ9|L_|_)OZGPRDAjVof>ENc%4Ewm8eVNgMW)lXRDW zZjr6Ij4`X|N^XWD7u|V^erQ`lH}+?g_ZUIngBy!12p-;m?$wNuJr^U%Wy_EUsq(jY zS*Dj8K+>vXfegrQtelrKy@CF9fxXRDx<^3Z?gi?WgWq9+AbRr`0}O~WUUBkb47^}b zz7ZvU52Ygng|oW={%Pt^72S=IH3SQp$lLQ3FLkr| z_oM%ZS!actP!H~+^HdWRj`!u913$+(Na<+^62}G$Imv%2SFH+}S=~%8P2VZms04fB zVkppwr>Qy=Q;lNrz*8J7=Y2+{>U1% zh+n1cl9A8-oOn$7MhkV3d~b{ z!4{7u|N5e+A!QSy`teZN)`1mo!bTKwWXXg%N0a2%6dwi_(Ob;ZYYw&N`O(Lm!uIy6 zjn%tc2I`StBE`WW&mY_J^QCr&&C4ObFUu~jF)v}Ci?}LK6Eo2O^qG6L-JlJD?k=}} z_rniK{EE2e$%g)P?b?{zLza6E+q-VxRJ;!248Q874UjZe@^YDkX;QCaQSC|QfnOei zo9$W|hjtKSnv>0eeiV#HsmmcT*++glJ@T!#u^}soC7^O?bQ3T#ZarF8GiqM~< zU(5dh=s%V`87Li|gdU5xFE#SXTS^7wz8*_6r0n{yW$U;AwJ7^-6S`K4jL>cEl#7Y$ z@Mo8mSl6}3Z=;p2{tyy5nt~RYCvGi6(d3?a$CEL*5 zt3o%Cs4fn+d`(}vruH{^ud=l0w^JFl+paxe2{E?X{8YKk$ME1Y(C${0pU9BK>Xs4W zF+D1Pa8HwX{lqw7$j@rBV#YZucd#;M9iaGgWKmzwTOTDFXb=Gv-xdxBSA*cM6Tds^ z^!vD@aml~fF>v2$lSN~qJ_!A#NT~jLv6S3)$}@Ma$!1GHEl|7p%=?c4xj62cn7YN? z{9S!?TsgAQqIn+sUJ{-#N6mA=3a>!jG0W@?dA*UM0iYHet5rr&MC?=x3{nI!FPvL9 zbaXehXREhw9c5AzWiYK`9))IGNAf>Zu!f`GOF98?QG} zl#6f_cQa8m(V#-`=O0LfIS_FXCn>d4&wuh#x)D@@RN$fFYN$$gn2(CV6p7Y#b`H1u z5#&T)F1IjHKI~o|MECc%8sc0X#A>7n4_3RD95@=;t@hisa z>84?M6A)eV(gzLZW_@kecb2Rz*M%LtQ_Xe}wCr|__?oEy0R6%;;Lqfy z(5yG;4_v|I!`pH7kUgOv;~B@+0(4LVQA#~h|-wJO5GdJ{hYln zy}PXv_-b+GqS0x$Jc)d+{`%Q#XxGZ(Fqv6TGt;Qeg%8nu-dK9Nwdx%;+3YKd4NeaR(YuR9i?F-+IRoHBneVo%fpg%tS*bot=+`t zDhGy-h{bga0;RZ%yg-dzZ0a2|X{>k2R*fp(IueOUSV7tfBL{fBGDx)#NY^bAK~Czl@>)%<4rhc|JY-7LFoQC?CKzet&u zAs-i=U7CJpt0wxZL75WGrT_{#s+t)S)zXWJLW=fXt<;(r)Ed?E2XpuV27X>3sh5q5 zC0QNaIVYoOt-XI9J*5@vkXx6kfixS z81ae)xf%N@j@%-uO966^#Wm2++L8WI5zcv=i)s5LV{4f<3WEx-<~zBBTh6J@(WiFb zL_M(~11ms5K(LuO#**Y{xN>_}q!ITjPDK?W@TW)(P<@Y})RhgL(DPEG6``ryLNA64 zMjY-9+?jV{=B3XtL|=G`4R2IQ8)T(QIx>EC`oteeRBrZ-_v`D;YDcn^oWi22w5q&X z@SlcG6_HgV$9ErRE;j;tW!@g;6DZIZa3u&uWI7b$1PmN#sjPK{rmT!@#FLx0U<&B7 z)wln6cw6o$9`M)w)6x;~Jz8V8*mW?aO>FvT1QdY#63*jp#n58BfMEtY6~uAW-0mi*n_++kqeaa)DHKi5 zRqeUMR&E+<(}A`suBzu}t?~3aJ6J>62{OCSekqFLuM5mCeO9Y&f;mRmC;5s>OKR~VeMw1hEiC>p zF+s}m`O-OIN0aRM;! zLd72HIoEFSWN(+96xEI~`;YiJg>6ar9)DY^`kl12~CHr*bl?XS`5m%=vR1nixXpcO;2H z;*f>WoqJIALAp{vs!Rqmog`j>q{BbL-r4@FR9bNKMVf7ZjJJCd4b?$obrqg-xTfAAi3qNx^ zBa0jbNh0^dK_6KUta??epR-^ge|>W zS?Z}rX%8Db^U0ud1K8%%6C>c~Q2agrflpl@2c2qKQW;0Zi6UJ~XbRYR*v}C;zf)4M zOGmL_I%OM(ySW;p$5RzL(GJz*0su+cu~0(k*j3Y1GbWt<`n#7-$2Z}=w>`!4UvCS2 z3b!T~Hl%Uq1KuLib(S7J?t&{)Bb+B(~(r*W!9fs9As=TQl15A=OqzEx~||w*IW+GKT=eR zW;d;;|9J40LX@)&&^$wk4ibm_c`Rw)n^b95 zJWQ>hOruvbBCSwdz7Q2M(wEY1&za6tXcicQe8kVbdtdRwIE`pLnZkY~HX(2Gnd5a5(r-_KbQTm#%R~WdBGF~%d==frzNa%^n&_qa39tJcV^Gj}E06dIkj zQM7pJ|Mx2YjYvVjU9W9YZF_j6l=;+wHtf7oE6Yx)UpEkb(4P2p5lqlflXPAJJq z9x@TX&Bz5?dOI&wjc@0=3z}lF<7Th{Fn;vcu|ohfa;}wWQf{5W?dHx)+rtc4I$xZq zvu%6g{##B(M(z1}7LK1iCOx`!+m`jyehZ5+Zy!Lx#b9Y5>|%aELizlr<}=A6v6GZSBZxW8{> zO5T}+unsEz6~S`6;25BStI!#-I4wii2E_jXA{b|Fo7=)>)beN&yw&{sx$0)>tGHS!oX zY2b|Q$#9^Sk{z)NFXbZy|2)-9q;oW9+&&R2?Lp-B=N`ax^^HIB4iCY{HWTPuhv{j$ z6^s~@5_Mb^{d#d6t!%q9SjhSQj+@kCusKdQVoj=yE|tH6E_uKJW6mn^X%(b%C`1{>oqztKdSFK&F$L5mIT`|Y2Re& z>#clmXCUw5`{r2gbxC6Q>T(jPsU5a8D!#BO)SO0_uzqO;-+gboHR`rcP2pukK`ek@ zdo{McB4nJ)dPd`N{^!p>0MNEnrwZx6)9tZ~74~f|h}$ubwvk?BZfYcA7k7nO;Fv;O z{nt2mg?#cPcUStfO>0C(eU?%-6dOj5wI{`(IM2fc`MtbYvT(TY2DYb>^4bQr^hWYl zi<)UBUw}o~tR;lE)BRI~$psOzRwYYKjvwhrr=%6B2DM6<-4353bl#O~F-eOt{gfQX zhZ`T(XFwNl2g@sXm-3BCKlUw+{IHP+O^EF4s+@Sg>%8WLJ)J!6XASyquvGPxfO~l` z^PjyF4+s60dhRed_rnMU2~WUOWhWb2ZYU-;2Y=g1pLLb@CI1N_sygErG`xa(#!-p- zHBqWD$z=#uf=htQw#h#L;Z|)V--h6?OZA;xP_)4w z(oV#5d2K%3aS! zwz~IS(Unk9wtY(r2UGic&~z!+B+vOYd69^^dg9QW!2CW1aF*V zy+5hP)(5?RMfEUzwO*&-qepzJ1RjS0YgSUz8@0eki@_FrBl?O{N1@d+_A`mn3}*I* zPpgQ9Wntso$P&nIsPoy@q!;LGh9<2oZN_1uUMA)!1VkJnmx8bLM)I4 zdJyJ=P}uz-1un?H7HU6~m6Un#Sv_)QI&bM+h_Y5(9H>-)zn@?@sgm3!!Rt9X?qcU@ z#xci4+iLD+_`4rMSBN^MrgE{(XFq;SC9N=iSxsOYu> z6~5U(Q*;x$S}ltW{JD(m#4uun0wl^GG!dT%>AYtK8cdCkJHGE}F4;FNQc}8cH#V9` zHM9{-(oVmRpicGw)?sUcpoWy2y^!CqBfT znF@HP8myD0^wKg)+N|gem%NBQE{g?L@<+3YrL(3EBt^NTvE!$Xe0Gz-#VZ=IgvQX|dR#ka`F;SRFcypa@k zBGcm7DsrjF*jg9K2}@qgXlEAPleNWJSNPonToBZxfcg(=IT+L%gHhLXmptz~skr~V zOF7@Z{kiNy_Z`gHN8<>y;Fg~#Q%xP$tGJFk@LmkCSnL#<|G~}bfy$Ol!uPzZYp zG*Ko|O-~>#LbVB9DaaF45#BFy@)lsXgLlg3p4ltTNi)Ea71u)@OPxNu5OMWaND#~W z#fXTrp3)mTuuv`!h5jnuXWZ`<@3mjs#CkKg-v>%F5S`Sm4SE%tP}cdHnS5ZfLMwU) z(Ko`Bh5IE9wfq?JvcL+WfX3~Ogzr)}{{uWP(ho+2K@<;<)AnU;wDbf#-Yu}^`d<78 z;QgHPuD(`hx)(tBV6B9ljS|PdHb3yDXXb9hOiADKiT1JBlaSfuy9Ke73$e9@)L`7R&SG&axi)u}?2z;vwkD|Z!Y8+v>eT5DiW(G1x4G)DrEMe( zb3|!FR8uDVM;BhsrfV;G;g;KWH78y?;_|4XzXFaN$z6r4^hwCEiSV2KhCgT5zQ>^RuhUqXWqyn>;n+h(8golm0~HmUa9DgpLk(9Omw9oz+s+MiZo$yoHo1q^B(53M#63LSwXO zW2qMaXx8O2udCQw&-82O@9uj^_52O5IpZXZ4%4;gzl1cBl7A^f@}^kUZ`$m?E>57D5%i?tLlN=^oI%9f1 zVt6Vx=!iAtBgT(`8jdrGl91htmlnc>d%YAVFN<0k*Vm;hnVa$*eSB@q2J?&!Ty}tC z?q9P`f^pl-3|Mqbf0oT%iaMk zDix--604?=?}Jj!Nk<&C#JgR@W0K-DEy37`^=bf*BjhXW?SmL2gcrNdjwwbASQ;lJkzG0F3G2AG zGF+Xgfb`hJXqZhn-}f7^uu<<{CMSTqgGV-*nmQ4J%Tuh8$>=0o*-B87bDDtShKcNG zR&%T5qjSM31Dcqc-kWy}xW6QQs*$C;1f}1kimT&W#!F9bhKHw`cJ|sF^hlJh(`fk{ z3T#)5428+tkM!EaOe{_()rHpNgm6CezH^1$*Eh*J6mM`CE)AU(60iT)3r!MofBwpGuy1nk^=JYCgqdLKD#U)Vzi!`ejmR%IMM z(?D59jYp#-S+->&gYajkrbBJ+n$I&!wiLC`RbDElPWc#osZ!(~ov>wb|7wTc`Wc$9 z(so_N^`KU5WLD)WRgx~~j$I^t%p!cCY~iZMx{975;a=d1Ehg_=be=Nz_u`->-|u%L zUn#3plqOfNcpOclC&y&vDn~AZMPsG8t##zzW{>~^Yip4Xw?L+Ayjk>!0j2a@b zx1^?YC%buW6Mz5{t&KbX)xRJbL{BSi@ew|4+Fsvv)_Zb zDRsk#uOh=W6KNtdDJ9;QnK)^zT$39a+awCveP|a8VnjymGDPQb#|@iH&!1g%ofeS} zmcE66Nh(BNy-p^bh!sd7<0r?6BO1yXFf3QST6(j;_|Vep-`dbpK<|Acs^T1X`=bcO z3Y3W$!e9`9I57f}uz&J}$pUEgPL+Sz&%y6?E-@--plT~dU+k|}4edlYMA%*Q$4>YK z-gm_g1UjCW31jaxd49Af`%b-Uw#6afgtDp|VuC|}J&?yK&WEaY6J34a8q)-Mzx}wW zscBK|Bfp^${^JSoAcu7OFXw~J5SdVLLTWf82oe8z|MJlA{m)medg*)>g3?y?mMUX7 zD3+56dx8r~{*S@(4c*~`9V6{-ZY86E!fz|a4*B&mU^WS9@ALWh$?;qjJnrh}grq)} zAkGt=AjKS(%_;u?Gbvmgm9`sXx!<>i+ zOl~6tMu8HGrAu4OfBwGoP^pxPTzhj9MBA$HI7_2UQcvTqyzhCdD847DpP27e3Dxkq z#Q&s;e(FRr9JDfENZlgZA^U=hA6px`-uE!XEItQ7LftNbJ3lCyypHki zXRNFh;U#gN5?1e?aS8~gR>b}1rbY|)`vsRmqoDh69c9v3LdftK3eI-OsQVetP{H?k z+mWI@O6THt-EAq8zkEn;^>%h{eoGiw`gqZeNO<@##lz_lAH zANgeFB&rK-y*PpwefR6X`@IrwQcFQEKVdwS+rwMV_nw1v{J^_PaUDok-P0md+ zn5F>&Q9~~+5?pw?R6B~6{%+Dj*!PqL7h?tR`?{_tCXBu(XHc-(>Jb=yNoswuD&fiB z17n}sonkO;1E1;N+)Nxy0)2$MGWlXMxhvl=k}Cch z=W9F<78*(vTF8st!r9NC($)D+C5DG8ISR(sP(CD9$uj0n(7DUqMvn%Fd}xI5?X@Jt zDMi>3{%uEz{wv|?jy?8JKCNEtHnQ|1`IlWwE4X;c8d_Zp{}>%Y3lW8UeHcs;NE~J9 z78w4pGA?s-7@JaEwZ~)&H>SkvtHna6wILpvlURT~8L1`6Zvw0mF~&f0Hr}H0?fVIX z-{SrDV#jk#i>k>eJ^>HZg!|O2CI9}VrccWTwdht)#ED}TmVqeacAu+GW^98?)yZz= zji;@%sL^a~l}yQPEnnD5g!WUhu$=w_tYqJ`KgRajZ-r(Zw%@;nY7Wy&A=5-+&d$4N zzK(-&9gG8;aND|n&5xPu2sWx7Fw0NYXU0)0%P=XBtQfVF;`D90ORA&crO^hgNQSv3 z`3DK%R}WP>IMI(V;c}AOhXfQMOo_*Tib@G1u!PbAxM^+KaN?BKlr{vHDrPLqoUHZZ zIYwd1k8~DDZmb~-3gqsE)}=zu(i3*eJdHv3Y?p);Q^#_b!j1BdewL>gQT4AzU;l{qWfz@-Km^Zb32`a(z*dAKzAZtSV-~i*PuKoIQ}s{N?C*=j^pd;l_fe`p z0uCsQzszx=va7v-z2cV)^++?Ki-}|u$@7(wh#Pl@d(98&=!9#hveNAC^n~|DV)S_~t?g;Yh zurM4ivB=c2ZsX}o2UNcK{-R$L!(iUQ^g7<4<#-bt(fg@~Mmla=lQeFv+W4SKQm*nd z!CE^?sYPQn-q%U$fh1~gPImluH41vUOG>%<2bpS-sH|6YJMk`P{X31%f&3HcR<@xcWl9(=4QW(% z9B-Y+ILJQhd~nzzb!p1|7$Cds@#^dHwP{>55vQC)Xq3DPEjdGNvAtuuux8nyFJ1pT zF1n1qg0wI3^*8`vavtA|^S4S|!{(S$#blyMPF1=Oily8G+cpH60PD7zL{*Q{Nuc8n z7QKFx^yTAok|`*V?Z)U5x+rP#wRtnHR+IdBEZM1Ae7t$cy~N0m%E*! zCs!o*>1S9kXa*-AC)n=fW%b#_$xZrKc}5$kIaBi_Y7t}|VDm9|h4DAK?@@Gsl8>9? zoyYnxclEeR!L>Es*r3n4a|KR2Ry`Hfj@#|D-e4PfVkGGG)BR?AO-p0*;vaR+%If9f z$^M<6>Ksd*i@}5RwY#U?ntjiDR&xTS-12suQYYe1TVqGVqN4MXK90iJI>BS;lRh=h z+v_&sapdld4x*;1haqM4YiI21e}E?Pvw=xYqMC@zAOoCF5ZjrFult8xhtE>q!_;+& z@#Q&HYF&;ZAM%l3wT&O%M=&*%ldfkSIqeoYAN$7zLw)$~tWVBefA=67#fXlqRi|txEhhPF}I_R=a4}v?dtMz?{ zS8a52HUH1?LdU=Qsq)cuxy&~H25DX8EXlU)^ulR)J#4S?cP{X6LyS!)Z+wAmVZIjM zvm_!deUf_Jt}0%b6&|2t0!eFacW`Cmj1&IGHyaI4cZy_rVO|%d$W!4d&!y%L;ZMbv zIfcmKo4*py!)Y=Z@^I&dpJ%!CbecKlwzlerrM=V+m{4EaH%10^=tU*`uFNMvil|>= zrWcB)HGL*BHLZ?Vuc&N2v)h<6kd=jmDQq;Ms-|$Rzv_i8Z*v(KSP;MBEIpkwGlt3& z5~;Y@%{_h<4Q}WokGI7gO&VJ>r8}}D-+@VUUuuT334by(2PJs^)j3Z34m4aJNox8= ziDA@&p}xLfcElodJo_N{`8@(s8rQ|Q%PnHqx{yKRP&lL`kM}V}EG%r0g@uc+|Aatk z=E#f8aki{vuc9TUCL`h^h~o=|z3crHmRf?rGh%U~tUR&#R~{ga(Zq!+%bE*IJ}g z5n=iu3MVge3ai+|6J4n)UaA@?gi^fvBG_YmE@L;y%bUB0RQ@<3r%$_2YcVIyvK8A%|dZV)B4zU4xZr6SqVmA2HnUGsU04(I)F$=M_{Kl~wl$xD>JVd8BgJK2Y`Qa7(jA z`D?SXp&IVG1X}w#9>)$XJ7bxh6!xn!=(RrLBDOSTULDar>EOp0hR?GrY}hk}l?Pcq zdqLKfqS7P)Vfs|%F><#4H^Qo?agn(*1ve(5&ZZM=;>|jy#1-O~nw%OU?<|QkPyDIy z7x&_;)VceomIs%GpE*n%5aY8qr-yzn2@iTZ)B$4j#RP@4#*qaaa)LTM$1D-K%Wic2 zR~bi1Red>D?a*z+<5JSd(yxtmslj*JLOKTxbq0iyjP_;yl0>WhHP*xFab>5C@aUBD z4b)*r%j9=uIbRu%Sb&&fw2|oapMF2Ryd;cbTOf?8eh@2De6LL`&@c|HCW&;{mR5Pq zkeY;glvo%}9G>&q->_sK?!0vN_xPav=uw{NTZ;PR)fHcWLJ6qldKfd;?NB8QNImhD ziiNI?`gkmTXfZrkQZLRF{1miM8`*+Z);+@2zQ2Ndz%9EJp92P zWs@|=B&w6N=4wr$;WbN6?SqOw96L0w66v&gkqHp4A6tyfsA`H-u2`<_ilV0%7g+-; zGlb)UThGF(4BI*`OQFz$DMeb3nEzB5?95Jj53ZD|DJe z9qk>$wyxvSIka+;b{EbD8&#j*{Mp#LX+#{>sF0)=`!MkxCk#scT*cwy97F^i9NNN@ zWUE{=TN8YR+1{OO^p%jLqeVBu0OuC3U27EG^0~|s6B#+Vqcd7*hD`S2EYb0GZXM@R zG%^#xFmSp}I-nw&Y|b1p;2;MRh7M_I2(kD#!R|)Yu1(b6l-`>DKHh;BunDIg*W5uK z-z3k2dfwC_2#2O5hUoDkyf`}(%s0WcI9uPV4&4ORZuqarq2H`f8(BCjiO9;e4M;AO z79Pwl(T3e-%GZiV@}-7hnZ+O+iZLar`52NF$#^-uv->3e8$B zuGzDuH7Vl-b0wB!>afdjjv<^#xX3|{{ofE&;qzC`5B9ZDh?8L#-;dI@F5b)wLRL(+ zph;Os&9^pBl6MSFmy$LdN(cP%lmQ9mMaLb6F19oFHZ^xeH_poq&8??_=7p1c)MD%s z5*COx&6x|)vM!Zf2?^|`#$as2!cB;azNlt4&MkwTY!Rw1(bt5kL2o(H8b$cbB<%kH zvp`J0JDpy8NaTrtCP-MX%e6wWRVomIbDZ!&T$76FjP3D0(hWPsUL&@*x{c)#I}${Pe%wq1zZvF?Q80{ESTx!dBic@P4PRpAzWya@yJ5Ubci>z)}l# zg=Cl;wnUmn-?buzrJb2^u~crWzX9j)&6kTYcd05(cc;%~6t}Z7Mvpub#JeOUB9bKA zhZ{yoW>w93)}5_1HnTx@Hg^V#Y^%7CSQ0@a<>&`q25R-5rR3Sm4wrod1V~fuV4cJj zA(0QE;1ay^=*K>kw;0-2|1Wj*e zaHe4-w=gQmKfW8}Q+6RWOb-TDUsAth{ZsgW=Yl#>8dUOb} z+QSf28Wnx;25c*s=JOkLU{@EY_}W{&7ELx^4B7d%k-%lt?X?zGjxR8gxlR^Wc^N}H z4gx7q4(4vJ;hwEw{{RRsp=}+`zC1-GoyE&)Ho98RZEd1Nk38r>K_W=k5kf@j%8;zZ znTO07!t$u4&2`()e<$1eW0aMxV?)KZ{{REE(P`qp_)NS>HnBdHKC9vhZ}eE=hU)#M zRfA5`j2>^<)oqJA&2XxyQ57TDtLAZE8oWK=Uj=+gdp`qs!%Fd{kRyWSb*oJ(?!{f& zG-+-2T`J0^H!m5J8)N`Fk`4y%jJ_mkh6wyOpnNXyZJoZAI{lkcmrpkrmsafx!b8s> zRb_)gndwAVFd)pdoi)92K#V$h{aRzU)JjR~G$ z1Xw}~N~+A8kfKUX*2>rJ?X^D+$Ed+{k=u)=f;2MCEUOsFk8{ZwQ-xx`DFBoWu|X7? zr;cx8@ZG!mp zW}3Cd?yx3UC6U=>nmCjzG5hSXC}~a^S937R{8ji_WAK37X=~#6#qFlOEu=RoJ0w@j z9B5>8IBp`H%mt3p#Y+`dR#Dq{XZCvdhoX4vN;#&f~IJc?$MWj)Q!zLjNn1+>=_2!YF@~gS%)9LHR%= zS7rM-c&2-8YgO>OcpA^eIv0r4TIw1t)Qb>Tm9remsg;B*hz8)QRJtf-EHf&kr41dL z2`07N=q&V~i+=%pQKo!H@dv>>c(f?|VQV`0GhexFOH9-!wvs7dO}@7h`Lf2_Bi$6c zS;4}bs+D)g$#pLmCZF+A{{X=DT105~HqAbVdv80-Dom}h<4JZvkisIjMvgBfRydYM zX;riI$HtF{zXf3Zm^GgTYC0XI_lxXqtYq-_h_B(6ZAVTv?7=M}cXDJ_M^dRfb-$mu7zHJ0$?IC*}JFMy#mv^fe&y@mtSjra2cdOLkPpGVLmb2h2+> zpr~G3E&`pcT;{!Lsc71i+8B;l&C^0np=MQcoTlvHbvXp%930kaQF5_SO2+NHG1||6 zXJHN9&Y5tzw2Eu!7 zq>aal$-8E2jf{?nyI3LU00mT@?0x!@diqTjjg8i#`iPP3!NY?Ij&j#C;ys^@z)JTR^SwwL%sUVU$4C%n-fh#WEg|~V3u?_8| z;j}Oqql!V8l1<7TR4F6_jEoVH>rIA9#-7a65d?V|QVH#l26^qAXN*+)I8M#~0AJ)c zsyjazd`;8t^;z{TJ@4)zkU3a2i+foL5}`re8ijw50l27g*(Hh1X6X6{#D5UnE%lX! z^{?85k^Qn@*735h`pi3R4Yvbu89W6%kfws$X?kFn?Y45Q%u}k!TIFO?F`c9!%I7>X zu^AkWIO=>0J`v?|E!wWKiy+DQgdBxWU;D!Eb#W;s=EoLhoR zyL(v4#jk;Cb>9g;WM2mSMe+Uqf#QFRT7HS8X&N2oqp50IjBO+85?e+dOARgH$>ItA-5mfGwOEKzC}>cUr@wvDPHazSLwu3T)q_(y!=@hK* z91#YXsuEd1EH^0R51W(oX9qRW7OEx4{{S2R0A8E=8b?k_k~6v5W=`zcvUt$K59l)l}76i+w{(JATo4zG|Wvq;OcM zA1+5g4tO~K0PF1-$w%IIVtOH7dy5#F1h8o&E{;sGEU5S-?m5B6NaLZ;H4VM2k%XFA z5D%Spb>hr z6xiwxFmTyEp@us4KDE-__`^uHx}QSu{g$O3muqDr>2cmq1eZ4QNUB}@#fV`GE;gtP z7dT}s)rI|)HO+(TO3JAj5u^ui?$-eCY!Cq_9C{FQSks*4CwSZX{x8cgE%?m zYDprviZxwH$%<=(G!ai>w4O>diopD!U>%*F|GMc;{6bh_m}m^_<=sk7Nq2-gu5hx??boA-ony ziYix8OH=}9gCKCZ1Nd-z5(i#* zJv|?A)6W65vAPQZZO(yq?c}Hgo=E4h#YT`^M{VZHWY}dW5P-vQJm(|pgY8eZg7!F` z&5Fi~qFEW;R3AJ-FX0LB?70tRBq=Mo$jznNS zV93~IDmcdkdI3q{_0_EPw7AkPO3bmyM4$iw+QF2D>_-^rbB^MTCGO2!ZZT@j9`)dR z4F)?2zB735;x(n-v2hjDR<<4~o?FJeyYj>Rx-)GeU@pjFMxDr#MkI_VQN?)s$C`(U zd}usNcd6aV*Vb_*;>#KqCAWF30163aXKelAWN(|e$njX!w42M_AH#D!M=< zrLpG7rA|O*%XB9PC!E(KXLE2dXMXPlQbt59LE1?9vD1;%f53-o;GG2LCx4cQnJ9bh zqeD^B^viapp zo+6h`y3`!REfQpoITfwt$Z;`IJ2J9uRS>ef7G zZMBz&Ec`#EYi(@F5Q2Z~n|DZ+H&02^b)PLHe8i3Muh>On>+(Z^i!r5%@>OY%R5& zc`RDvM!83On zvXNG$@JEdQ0B7w^X7Nm8NO@+G?ir!Fie>T?lvz;3>b>c4# z+dJ55(6pBG+kq@WQMRV(Q)!fd!FLs1+fFckQS{ZYarRFQR$mpmTFq+HPfb(4?^UtK zLzc9;Q?>hj{{Ywd9k!eCBG1Q~t^4T~(`nYTPZVm}V0a+AiW3G-SyZ>qB4@}MVjG6( z)A*xbyS2N6LDMwL-#P9N+MvLVb zt?q#HmcOzH#uZrukj$>J83Q&~41i=XX7V$K7Mxo?^F}?2!kBhts;vWmirg&OSHVd0e zsGy%u7g9>{#+LE0y@Aey%e*z=yRQ>?hf46>g{;#{d4C!hpa8Qj z;9DH1-~xW~^y(|oz-1U*P8#^Cc9%1HT|b2`pNE=O*yzFF>CTt$7Vmwu=zBJ&;|pJh zKMSmNPlc~%d*IC`abbIE)JpP`HavgXAx~hSeYVht2vpM$25x_OT-=^ zU+gU|_WuC;M)p|YNG(`|kiW{Iz&prRLU5xyhDSB){{S0qJ{x$C!&;@4*M{y}##bIA zmVG>UKq?VQu6jyI^A}Mg}!wvntz;8NAP0(>z!3=JLvGOY3PRzSA1)L8n}+ zEuFlsSIL*lV4ymnEg5cAm0JXyeMDh+DWtjD*ZvOAf7e})R<;rpYP~n>-q!g)Q!iB0 zEF*W1R<%ej1T2+~xzeTkCD4)!m3*X<0lWY~87Bn-j1YQc9tRb1gj2WX zVTef&!uc+#)X=SKdTG>d^+}yIr9a+Ro>$M#@F_jUJq;eFErvkgLho1)gY;+jB zapUa@?@RFWS^cx^w)a-CPWDoq37{7eg=I!~fmQ*PWLD=RK24*BI@B=zs#w_GA{&Ul zX%};bK4b_pLdj4fJXP+2`Y?R z2R|{=wJLIojN^T}dHMb}JozS-ni+}POIPRO#zw&<~}$OrtqS<9{f=_1eNlVe=@id{n8$K6lFW(Qj_cziyio zQZd(G^R~Y|er96$OHI}-?Jq8MlO>(y%(jTKyB4t#M+2E~c-e-JOzGP~CH}H4ZJ|5|C zCb?v(4cjg1JWckLy2xX=G6wQW$h$nuklU1FCok|{iA}DLf2LaadfQRd^*z$ft7=xB zWbt7`IDwr=w{{?wkSS(DLls@b`SP7wn0L!9)3%FlkJT={bkJ0>HE7gQrk0%^srv1| zZk-N(3H&`{sp>YG!(CZkX}1#F+gsgR2;@Q;%P0^qP{SiDhQl*9<{MPk)pdKldrnOk zRljR^P&AXtEV8o5%B%}2up@IO6ez|U2jyJt+SPs%>HaN_9a}|v$Y%($Yf^~jy^0u` zP^&DA5EKQ7P*sDoFacX58UFwlehK_o_%-5tuZJEx@Lbwfsi0km@9pGel1L_=B%DVp zuHX-pRf3SLio`d~$m_$(l%k}(uYc>x{(G106%aUrmt)MeKMQIT%J-H~dGRcO zJgF&gnfZVu6O)6_(C3gvU+}e!?v-_@T|TF8V{vV#$mtw~NxMhF7{Fjb1zC<%76ouH zNFkBl&m{4~Bq=zxW3$WPf)5Nq1Tf>N`j1Qeqs*tB*=W!n{ z02#>@q2moxP15vRD}4*Y*7|4K>@8u`VTmVan_{lno@rYb3ossGMOBSTfT|U_*zh-k zulz5lORnkuG`G^O3`=dL>bCmJFWD|U4ZUi$Ge1s$oydJCJSu}gwXSmv9 zxRzMppU9Fpa*R+oB|{W!0tiq>Kp+r6nl)*;zE-u<&g%DH@ZM)dN_8NUYertxWEz5h zYCMS}%X4w(J*}vT0mv!{RVb=+@~Zr+_dp6oQAgEd(j6mkh?`q_%}tP$X4JBoZ(ohSUSmN|L;jR@+9^E^npq zTi>Ogl=mrddo8?@UPEoQkQtd{8;FeZLh0by@-;Up%YHAv0nWyWQnjMFT zEwu}Kd#8pyLe|nqqPvK@Y?TsZSrJ)qKxI}6Kp-4|*3j-T6}a5%H1D<)l|c-uz!Q)_ z-HdyU!>%hyRHn3K*`XuXwdg!QaiQ)B4f90o^4mm75r*=>a)p(CVYyX;u~lXaRY>0O zSHtg!J|Osa;y;Ss5Ynz1$HE2=?d@9T3_%gQXNm;g*omZO04!U)*UObwL~^{ev0YhQ zLn&33-xxfQsy^vb58WdiW3Ou1)-*o}+fAqGz8}>ut_9VlxxYG$3j)D*=*5I#9mB&B zU<8aqx6SgXAXTZRuEvvJ2+Qq5!kV_Vd@QsmVk|J(=&kav z$`+Y>F)~|UYO`1*605q}+Bk`VsZ<#Xn20)%%1CA^ySs|+{wV&;elz$*;9W1`1(vO4 zujo4do%Y!7bg3h`K^k!K%^Wf_TdJ9(2=eVdW3rvBthGbOU$dviABR2;(!L^mMewwi z`aZiG{{Urc0^uiy=qHv~L}M+&iBRVN5R4SI#sJYiX$o;^>UqpJHR(3VCM;aU{_Q^A4c?)^2S3AO=z5&Kck~kfh_u{nd?pH^PQPO9&V)3bRN~ql3nQ?~1y1CY7w~|-cD%`A#_GoPtQXp^`puq!fzy{u&5ObQ@ z(k6=k083jvR#+pF-4t3Wje|Q9xCE#Qpn`BuIaUPnD;D`>%$@sMK!(=KPR(^&*eR(rd7e&Z^zEI=xDoOB}ubI9vcj}6~=gICmM zuxrMW+|P9F1ZG7@(YL!WJgLbl4^lwFw>X*AN>}avf7kgHXsFoCo$lkgx{eE}Y?4XC zT)cBLB83j24#IrZ{^{C#9N?02^<5I~!Qhio3p`VgH7?^~>Oo?qhjsy!f-pH#*T1Tt z2wp|-_r`idHH`u@6o#&L?;Ct3SH{8RB~!`*99&@>Ht z*Gse2tlCSJ)MH{=YiQ+?7~(1!l1W&3O2Ky>;c_vWtK*Lo>i+-~bsOJ^FQ7>jrf8>< zH<;X@Uy&nLC0FHO08oDL+&y#4we7}@cW12VY7%UJ>tTQ_lDLIVu%pHsUz-y#0BJr9W%{h1q8jEQL{%k z%{0!qjnp^F#2x_*4=Q>0u9M;ifOW45>R;JfvPBihjb@tAPO=+)>ULxfIRr3eB}gL& zB&f*QCaG_6K8>U8fg61G*3q;%Y92T8(%rnoa)M{SGec%?F0TY-uuO5T8@U-LJxLsU{uK^UNOo5ox#D zQ|ec?%$FAI8apc`6G5^}VnFH!BmlCF$0$KNbGD{ajjn|@wjO*9W8yy+YWgOP;(acA zSgs)!n%1Lu`ff_pR{sE4k5JcjTf3Hp<9V;{Sl!4GM+%|b1Hb@$z+;}32D4$~7Aa+@K|Y+fX}H>) z#pcG@d=SNV4X0>0!B7DAv8n1J+e(K(jlqT@&d$e~hFjHLPp34OQ*8{4sNUis+-_9WTWG8AQ0( zVYw_~5nIM*a?z@;6k)S~7Xeoo3O-TAV(ND6U2g3sbun#5iIkDJXE+~$IP}L#>Enk) z)JW8Ck?t;|w~t_DX9Obu0INGs-3kE7BO9^Q9MQkguA7Z++SBrK$~ z$c0xCa0eT0?y$4vFe?OQ3JK9sSA zoH}hX$2K;Z7oCg%<$IBlkOoF72)@T0e`>jzf``UVHiM3wb?$vJjQZBLfn}xIHRavS z`@yo*{eV0YI?E_!C0r=WaBu@H326Cy-rR>dY+R)F2-^15-H;@*R36*!-+-y5# z`H#xrS~0fEOzm?e^D|wmzsZy;p$ewe%KWSWV}fx- zp)>e^f5fpF9^i?h5n?DqHcJBJlnOc8Nx%an;O4K13LR$RTYoOxM>4Ibo8{aFFxbWb zI1Qc$J@ZbLOUsna&8b@ALv4GhTU}~4I(&-}Wm|_-$csO8WOAb;?-9-j>S}Kf>Q~w& z`s#W(K{QR}IVrn)aga~j!rozj`gqM z-xByoPSv5i@gIiX{?%rT&!^gIUt^kRgJh|f%YI7nxCSGWl6pB*soxJ8oG$%c9GT~+n97LR2&1}70xZA%jU&zcN~mjEpW=fApm5kJy?#U z@HxrgZm&ka@cy5w{6_x(grnf!i0yPe8P*li?{2jzBc2v`l$0VPEy9&0hV}{u-@VXQ zCyj3=(mX{w*+c!Qs9qwZdd&J^f(xzhwM_N^+!{Y?-cw#f2wbs~tB>3NP2=N8)!cArEwJBQC3;kP1xL75X*>|!@AX6i$8dUmdjAH)m^O~v7p4|rqIPGA>Gu};e6!7OsR@Uc!8v6TsTdd|wnj)jaf4Rm()6qAc%Dew z8?lCBk_A^PrHKKFRV{&moMlEj;M?(UjJ#8>c!N;!ABpdv)Ne22SHIL{i4@Gi6%>Y0 z13m(ummuROJk|t$Rm8Zp)G%ADS->O=26E=aDg!rlwjw4E(Hf8kFSY1fz5vKEX)o?}MJp@tGg+$6zL zykl|(RwOALWuJ+4oij(c((INdw32Z>!4}BHiCv4AE4U7LP_IlMmA*@>V zrQq4L`+GefNH_ZY+O6Dh!)W@OC5JX9HQhdHPgSkAzn68_ zVk&d;-Vasy`FR-HUxM}T6l$_v_=Mk0CDD7Exz&SQ+&hBhiE?*_UB}FBG7A&fwtt6u zmXUR1sazB>-jWjFMJb9i$Uie8?|k7Ka9#Nx8ParU#qE@mweh$O5T6@&24`@~ zwShQX5OP5z@sX+8Y5K0AEw$eJc@sZom?KFh!O}7_$Qi~?4{Y|TJ}kGelU%!(PStKN zZ9Ky&Pd&QBl+FVp(P}M#A-uL`}$r&|mOk!v7?1s{N1NM_CP0Wl{wvrAG2J9XP8T@)yo|7tW4=b+R zs&OF=j2^`Nz+=AyJq>E){H#XBOJWS4H{Jz; zf--UkHO%W?4b*g9I?6~dA(k_8#LAZ|D+v^%K1diGkW^&lMt;3m>QR*?E8g0D>;4$e zEtM$Nv|j4g^fNULHX)`>3&0n16(Npr3g-m=Bv+;U6SUGkA?T3F;%kEzx~AI8OuCjp zW51H!76ET77EuStA}c4%F&@&RaACk|u*Y?B(j?A9p9Uw6x@}|yFkH1It`#I z1#c?&ed4`;!HYhpplsZFZ`r0Mq;S368ru-n8Wc9E2*`A^;) zoUeRi+-6$oH|-2^7$p%lTob@>FmMU%Gm~4s4c1>ly^c#UZ#L`jjcIoPb(S?!FbN|B zvohciNM87@no^Utr)_D@l`d%S(CYkO@yAf{UxRdC0$gZ1UYlj4Ts7P}c7>1WLup0#ldB zVpV`#;CO`UH(H~ntEL-9mNcGJMB5rO4p=DwjsO_z$voz$8)$5#SuN&MwN=v~Gj0TB zNynh;$mgF*=;e&{d!BtsRj(B0?$Z9h;m+5`e-E^`v3*CwiDNLEV_C{?Q+1-W&W0I4 zRRTlHZaGy@fMXeL#$iu8lax4e*aVi(Bw*J4=Buc{iFKmu z^1`ocnA^%GNM=?Hk$^F@ZUKWU1zt%VLDlgdiyP>3YKx&guDLD!-1^0=_L4JO+Ie>I z#--%tqc1Cr6%4GZOE%$IMM|7?*X8H^ey45@Qj~dRy4%d;ERB|}cfL(3DAoYdv#gjB zDB3aeWF`nLoRYW2L ztk_uJA)Fn)S=@R812CwbiCXDC->HTp5mJUC-83Qa2Z?{-QFymY_>M^STB4*Yj(>d0IafH z&v*)@EJ{--fXxg3Mi0N-1#}SC0z*SwSAs#TZwO{0kSlSBaTxJN>2@T6nVf$1BY;P93degD+yW~*1yVj@7mjbmy7s5x z{{R!*PXv(9s7zK#&h&K>G9e(G44jZL*@I^!oSI&(q?Q^-+G7zLWQi_bCK51Q6lM(D zP7`VZqm>vcak%H|ejI_UbuD7_L2mXot#bsjT%Ff*ro@f}^*rYtLZKw#Ks*$v!PdKb z{r>=u=w_mvt#h>SJbL}difz0hq3Mrzb*&hnf)tV9^W~Mqajd&Z3I~|UxRqVXSg;Cn z9;4ygZ7Tax(npPSJFA;rMoW1vEoEzyJn@GtsOYNC8vyFp3Z-1epcUL1x$lMcz9su> z9}W12N%HS^Jag+8$!8o)j2LZ^*-E)B$ja_s3Fr#=r^Xtm_WkCS;$0~o>e5?_YiO+c8zd2*nVKzxcAC|z*k82Lnei!$O=V9K!!;nu>Fi3A=WAO)s^t}$nE$`7IySEBmCc67TaLN@Vh$zOxw6P~Dz$xHzF>t9- z^F?WUw`;Yfysdwp+M~Y>UNFH;N&Cxhn_qpcwwvx#gz3_F9BL3xjZV)}XyVdkw~}b( z)7CeWa65`ZBXU7p24m}cDNq&t|l(s%@IHKT%-A{LLmW|P0)>-}xlvKX}RUeUd8 zYiX;uPOPmRAW3s>gJ4G>YOt*usPg%-g?( zi1|iywc(vryg}n_7sA%Mj+v)h-|A3Db#tlA0;I2)+DT^lyr|V!e9MKxqY`$4yGb}s zN_wv=-_uXO<>z8~=}rrlOPbpM0AH8+9(m$TLP#}tIF<;aSU-A};bro|DUwtU${a9Y zR4_ZZVYFvHqx&&(72UOu&ek3uxZ1tc#Z-Agvx3 zar#@~Ka6ZVYx@VU#3{9J6zje;(?aN>^hw$o)WtH9EYc0nbfyJ}GP1I^6=ym(s1{XLs#<#Mrq|U<3NCVKTK@pmwSP0k@9y-wjc)5rY2$+0eWDwA2$00D8?vHE z`?dfaDcn_#NhB2odAPhct6XUpTErT^oeUW|%vSFs7-2FZ38erJkjToQoZ*>wZQL{T z*Ti26{?2WzE!Tmxdz}t6`((G8M8D}!%vpg&g_xF!kf2YSa{R@Zf%4Qpvv-L8B>W80 zFaA1wKJe#+G^VwVEj~{a-hpu}7f-xFZxEejNK$VonI~w=1y)vd!j|X5R+VLToNwI` zMztJEwKsR!=-;QI;jmfHqj)yMRo65&Xm9lU!Qrc^7&LRQoakl+T>SCQ8F0#>faEbD ztG)29i||9@SHzDD>0b?olko0)7;WvPxACTv3;ydZ-dfA(2)x@VN^*g+*0okrh=H z4Q%`p)V>*btK(jRy2pq7SK=Kn{t{SxjR#HE8XaR)Wkwcu>3`+4aFaAqptxw2)nVu;k)W<%kNlEiXR8HeILW{d|2?mitKgkK{LJfq2c>Wm@TXsrU;Rh zxrQkKAeIfV#8+SqxU!CK!1i7f)b)$2t!i14TMxBrH!l%V*DypfVMKRG_jA1NZOj#d zh9y+~w|~Jye`?1=>n+v}G$)7=<~g(g#Oiym7BP@zFpbO$HqIZacH z?HMUAFYEHqca)r>)tW7Mk3`V#^ceN~OU*9!{!8m1A}FvTxhyvUL>{<+{X+~F0||E-bG2D%uYm0}n7kbnTF;5@qSBUR z?W%bj{Iz9;Sw2Y#1hHlLui{t3&m8<8@$RSK4AK#DskOEAEH;~Y(SsojI;jO6 zSanub9Y!`$sZKuj&#tJ-rzK0lt&gGX{vBEC=Kj+9%)X;@1hZa5EK=eakT6xZ5u=7J zC*I2p41(K2l6jAYe+XspKgCUYFoCqop9;=yTRVlcX688F84e1$6b*kYJWL><1n3gXx;k({&i8@XhY2CZzzC zuh!by34`r$%8U-Jp0%BIuE#izcu|gk40{~r+_K$E z3iww-TZ_wwn959XhG`@!12Uov462RT<+kUwX1urhm8ok@J(kik?((9DYl*-UqYIE~$<59KlWh+x|Z!slpGSm1(c zYx)i5jI-&tdKA|2+s@K`g5uzxCCdzM0YmQq0928*^WPPJ;tz$s9B4Y`jcwpx9BNn4 zP2rn4^^2`XLrWoPs81U^Lkc-!@BmO33m{Dbr?Pp?J zJ94El-H2{*I4rbHKUjq{>z!gqVUAakeCu+rD;Ye1z>I_dWrzgeAs(3QwT(l;P-w7e zI<1A1TiQc(smSq5uj41QgCVZUAHtXf({dY1|9kkHIwD2aiq+P{u(-Ms-M3N^eJjMBx z?&B srhnw{+Kbym30ZS=?_mi9&wT(j+HcJ5|oW+6EMZaidj+=I}rw{NFIKZ-Q= zi&DFiDaGvf49Rm6&l!Xjjfoiwa?Qre;Cwz2U&jnuaCUKV@lEG`yCvm2FN zT^DKE!OCH_h9$p;Cb}b1td~5V-*x(F`t?B!O-Uk$hc%m^4MRLtc7+d zT(_)v6ZUraW%19%Q)|8(@eTNeCs;KdJ583{Ni6ID9!Z&%fWbh)mmG{1B!YZa_$%T4 zZ&vW`li*JkTIv4)*!C=Lmrzx@xn<$vIJ8!aYro2)KPYhJh{+0_OfR%ey>-*7`7b*p z#HAV4zcOA|JUhkOUx<7)6}GEChqV;9xM+Nv`}=8Pj%HL6j7yP;6|t2Eb`+9FAZ%Oy z&mJ!EKf|pH#r_etmrFRLdvCqW!>mM9R3{m zV@UWyFNcqOLwBV3e)*x(ye&SzYj{>$nVtOiU=ffx+*|iXO!O+Ph2t+ZPaWIXXu8h4 zwi;#hPzbEF?+~pi6XI3Jg*4fGKdpQ-@J76{ZILDMAB9rlcpHQPE+TlO+IE~PGcMxC89A?6 z_#^SZ;|Io{ikfGEb$^KQ_-@T2lH&T~!k!qnw3NpjUP`exNfh%k?m^7i3ZxFWK2>j` z>;5*gj{g9|+B{DbEKSwTtdXl*@-e&@!5v?A2pw65;wl@vFB148S8M36ZnUd_UK_Tt zxn+_vODn0{w3TcSLE)6+k&3xgjd=1ll)hJA*U9bVI+9aPF@E<>rGF96uIoN2)h+yU z;~itf*IH?w9XnFguI?qsSqy7A7dxbmGsEmVPiFfRopo5#Zx@GAkoZy34bsvbk}3ki z1Qh8Bqf@$NfYK?_(jX-v)&Wisq%G~jWjZheEr&XqjEZ9$NcVexV~i%5U-8xf>tUjM+h;(~2oN) z#)NnCsw>w$H-4>K8+I0{^?F){E!DqUQPdGu0yAAQrP!^Z)te?i2XqOBDPB}XD&X?kZsXkw0uTs3X6RxoHN>;e0Mwga>9FAAF|&rmi?DOjMujSqhDZG zX5depSj)fGmIXBGX`$R+%dmft9wGURa+1J0LVw0K!61!Wv}NX_F)zQdE(ctkjPT@q z^0|++w%p=D5!EIpC04E|@tP!3j$DEYu`y^DtiCd1(%g}s6`${J`FoV?zfVt9h_~X; z4JcgT|Ka4+qrt%r-(3))wGg+kj-Bd$nc%F>h>jWnYm6zye_0RAncY`O58jP9!x}kf zM7M9BAVYmY^ERns+Zk&toJ?&wwl22~6p3u@|D6$-?hqAyrSOqi3d-)s@h$MC^f`zL z<{89EW}c_Ji_ajtlpjx=H`#Bb~6;&MS|>01&l`%gwV}d5l^yvyt1-RE}J)2MZI;zjU$@BFfe})nrmm%pVASO`y4Fq=J5&144qmKupZ6Uc{D>?J_Coh^p2=3PCH{ z7Oak9eVYoQn`(K!_n6dOsz<%||AOMrlQf>VM0 zi}9DzdX<`lK@6Xpzq5zDq+%}xDKFQYUp)z>MSiu=tjpj*?g!<|la%6oqs>{hkJtz3 zyn!(8c#kd_q`S6RjELc%K^K}1?tZ#G;SvefFVqy}Uih?NvY(X-)*nOCUr08;aD&{4 zcc-OPLT(qHYP1vcEdd%ui-e=u;*ihliz~u=@eBVhzr(1&w&=xVKlOOT;>XkT&ZT#U ztg6sJn%du$PI>I#N3Ao`((2@+0~8hOr;8Aimt$z{0f`9ELvW}k>M)#o$<>N&UhD*{ z?pE{8HCkL;ESD&*{CqOjg`L5yp}O3d-d)TJdWfvL$#m6HSrI5Zoxv}2-kz&`@OYgq zV(M}Q(34xfTU2L$`(}1pLfrG@kwZ(=2!#hT%eH9An*hn}lL>=0f(C+5X6wx8y~Hqq zpkaMRWj>7UmhY|r54VU{(Z*_A68zRbTeziT1$bD?aD0F}Hkc?{ z>c^2;QXeaS11sKr^Ta=u)c1k6A{eIO6^Yo6`Hn+V5?}<%WqxLzzVOGOzqg76;gI`a zUnOFC&#bt$E5!G57@Lk(A7I*m8XOJdDMw2;ePMKY$nXRR3hi*!8&0s_IE*xdCTl_g zdG4mB?Tt*=1?8x9tzV(arFmHQ|Arzqhl+|f$ZolBgur+YYJ`1I$mT1B>wuC{AlRv6 z!NQXUYq(wO{-f2Y)ajV3TiRb*Mn<|)hP{d-7p-$Qa#o#|CA#CL! zCtYKXgzU2ad29FmR}br2X;*W=Q%!BO8HUDR#h>ksLI&xbjj}kSNsRkO+9v)}aJ}qG z3jrRTR%*QJWQhAU?|D=$4i$p2=;>#e%0v{TecuwapFfAKWgab`Y5(wCw!1XO+My=n z)f@MY)zN}|ACgad4VEhmSi$;OZNdJ#$K!txF1a2hd|B&HnmYAdl}GAMc(YzOmp8|V zl*CAzy`J*#SUKhi%SJ#mKUe|TMs!MMd7k`o|G;R)W^y*7t;bO(t_fYxvDxHc^ zngK?yqZHK2~q zB0?a!%%$Fr{)$$|YgUIcxhrc{cG6om#?f$BZ!q2(0y0Q78gGjf`m9d)spjz$edU>T zx(vQ_f4dM}F{=@^*dYn}ps-|DsOxlHrLMowh_H8NxxNRj?wjh2v zrtk1fe?Ow~ec=mfv0%?#R~P;aC7QU)=;B8Y8^G@(Gz>6#mx?euc@HO{{6veo?db0Eegk)VV6J#+x~xmxmOD6iHH=hrAv6W$y78OjCpNAP!`5Y=CmhC;iO`8!+1 zc4VV0zit^lFljCzEnQD>!liO_{{+NV-55qGOQArtPN~j z0GrxTRh7A@tXi7$)yVY;lDOO`0Vb%3fh5f8Q>RCVHx##|Xo<)$X^UGD?Aw|k*NuTN zwm;k$wMLbIse_Ie06!1EZ8*TqX|``I(c7P5CL_mvnD#||;n&TB5#K>^EI;bxH*_ScO#`1zt7R_$TXRxWw5{^-TcjmR*1 ztS#sPcF{Qf1^NiY3{BSB&~2L2ON$rVp7S@AM;rJ6q=)0ImlmmbjKl=9^R1;GmJ3e@ zz#xt=y-K&RaP&{%`Mf@1U3X2(a*tNi&QmNnWwau`!_PyO}bucAtyH zwWR};=s0)F5|nlAFh12UYP-ApGD_b5uHq+t#Ifed?Z7My92bRhk-M6t>WM0Nztg@4 zj8ucQdxAVRmzKU!7E3v-ZX%yP+EXtDp+N?S6%+NWsI5@tbmP1!vAxD z+oN0K-TjY6-Znzd#J{zwy=oqIQDMzc@D(+Sc`cawLnwqhZ})V`06E;xmHdx@K{gM6 zpiu!$r&QFxL0YP)Qi|8c_%BAsL{mpmgT{4|abr?0Jeejz%M>-tZ=hZ#Q^ z9ii8E*Ub8cQO9t-R1}V83M^KEjTl`7DbyF6A9xu+Am6w|`E z9WmXo0eJS+=&*s}>2gDf>`I?YY7P-6Ipw7hF~D}Mved8hmdwO!&==#q&m6qPPz^dl zQ@QhEPb^pd5wzA_G)?;}?+-;~gl5bFyZ;d&Q&Mj|Z=N8qf|tey^j6MFGG&FzI`0Kz zG!+7})E%0$cG!tTzat^5!ZCv5B*F?};0>f6P)`c*kAO@t>KH~XI?NA)7TUTqtJGF+ zIol*xYj`-kz2#H*eR?c`ZblCGNj#AFcs@Lx-XC42Bv2S8_)E1bH-bJ6x)|UM;By|K z8NJ10b|?p<=TcD>otvg1!Bn<6bGsvv0S!vZ674wf2OgM&di0HRP zOcz+12Djy|3$C&l{o>@~Crk^rdZzAQ{f{7#I0eb!c+=0x+1jFHw;?ks2VevgLr5PN ze#qu0Z_c>a&};ma=B#e7ud|5lAmFlx7k&FdVfk3@H$KEvb%5y%E!Gk+xe*;Td5I1< zGjqt%{Stf`&a@7IfZYtD!a3CK{t=LyxplbbPrGKZO2m4&@3n_SvJQh&HT<_N^{YQG z9I@E(IPrQptR$;2=(=UfjV?_5BS4390lyAKZTT~V+)jS=|3`2SU#v>0rLFVKpC>|l zW*`?__*U$<%|ZHPo;U&0nkEO*!E7<8Qo)8iiN+>Z?7A2>4{6^q119Kz6u<@;d2aNL zqBw^=tH_}N&B*oc68kzIf*bG zT;!B!n(NZL5x!N%hz+H`s?C+V8?aTmzxneWhG|G4w_FY|oN1d#$mX;BUJ$COS~?67_Ce(C z`X7No)i?jZa!OG6)L)UyODylVu(={A#rl1;!qJ^VuO}_mw+~SW4@I&-vMxi79F-@< zmw?TD5YBA&?EBO*;7)teL@3i<*h3dJ_PatgiXgynp$ay-)a81;UG!4k9SQIb3<;%u z5g|H)t8=z@iLwe0u*~;^g^tkf!6>!+2@cTq z*)7R_U%EROA8j;MbaQgk&sZOJDy7*=MRe<@9?lJHfM>#s)r#yuzTDd$qCb3}m#xL< z0UlSHW`l|G3{jC{N=4ZoTU(}sXq>RVbd@+L_*Qzp8)h1?=$Wn2F|Q^_zdd8A7OKQ0 zhh;;NM9H=+^iT=3^>Cq2>hV0B!JfiJ_5C0f~@+PKo#-DIks6!;k3+V$)7H!OW|{=;!7%a8rJB#Urz z#NAqUy#bcwXRgF&k8cZ}xIWRXm`gf_yC_#dSV(BKJ&^#awXbo;$i;yaa%`-I&ijjGi}5ux=(JAj1T6mxlA!-YFlGeN8zKpzW(*rEV#7Vca;j5f zCAL7(pyxgEi&-WL@-nm^SE_}4;wnPR$wW8vJq-EYHypt6N6RNQfxE0*jx8DtuQ@)L za`&8kzH9nTxlzJ8NmP}I^NQ&B^KUU*pCZo|RfVChzCLGF$Mz>{o%^0xckuxGHkUW! z+)pF|CeO}rorN}A5?FriA2$-m$)Py$LstGIrI3mO7b3=KNV5tX;X4C?OP?z$Gvpg1 zrRr!>f5o)Jk5xxU!1 zI&oC|ZsQ^}lc3kqb6E3_0La3rhS^IH2LozqJ9=sxpZR^8xG_kQ5tbGydlPYBof;Ns zYqn!#Hxo2EcAeLi9;tF`ub`M?}C7rZU;Vc*pPdW)~ zYlHWvGm1nl3?Q?!p8aPkN@lq(?MCVDou?HY7dM@kYHzTqXeLzsk=WKLA=o8rVf6Zm zKlL%&gZAmA{s(ikG1+=;F-Wf_VFv5km6jePY>Pi!!=L&dm<$U;7yj8A?(?KGFJBxC zE^c4+SM>$BckDFP;GxdFyEYGBhmGH`y!Aj2HngANgV)NNv4dQQ%k`^%937^os=7Y5 zOH{=GnTh!fZKd|5Y47@cLgQyg#Q0f8IG)VW|IKKF4My;2P~uU>dUh)^K|o2(R+!qaN_-|he2WPbb3x^jJpmUmMR^derI1NHe^WG z0ZAU#CEAnt0=Y9hhKpruO6_qFiB{~Z1@0Mh|G3J`EWPKvB3;k^YaUka`@EDP-7Mtb zzn)O~n;g%E;6pm-)_jA-z>=BunH|2$iy1@Ge|H^#`BTXFE$3}CBI%~l@AF^7>R-7@Ym>D zR-gvE;mQ}Q1L@uh0tOZ?e;;ban}PdMNv4fpv%B=8da#5-5{|^};bG!87b#0@{R>$3 zcp*Za&p>Xz0a35o_0ph_$kqDsIH8t;#ILKFS8CJbLc(=V)uQq@Lrkfg)7)}6Fn%=_ z03d6iQr9)Cp!4UCxMG@OlHzAjics|eXXo5#NKB4;Cb2{M<1=Xf=L3Rs#rH+8Rcmrv z6&`*?Cg&@&XLv}GPI&)asQZjk4r!~m{VDm8n&>|BLxVZ{(3iD)M472gM*-^$g*Uor zPWE0J^R*r@O z%-#hHJ+Y12O#M>)xwugAdl)BH!VJR|DJQ`&63kJUUZMNuq)Zdh{I&I4+Dq~_ijVsG zq{fblb#BIsc-JDp4Y{WNI-nI;5yWHDBDwQ4m~{I@xR`FF@1vwJ@8jh^?8!z2+SB4? zf3P(pQPDlZsY3)bJ4PS%rIk2!o(vlb*QFGPd+IMP;x~(*@bK}9lBowpj(Hc#43WuI zxojp{(v5aw=vZ#gzX0#&Ec2xPdC$V-7xN&VPf~?b#N_g11JD3SWygicGnese%`5iP zd||#|i`6gv{NS;wyLZS3uNl>sP)|~k! z%6E`+3I2^dC~#25ei#-W$^5AB$u?a?P-WsF+lskIsg2j8bnapwCNGl4cU#5$-V@$B ze*Fe77E0rwB@xejW<(o_!I?x!k|czWBB*?*@0TNnC1BaAr&p;X5dEyIteOWaD;B8? zkglWUc!lQb`^?Fm@z!k0W?TGT#CwAv%HNzES^ZWm^E?gB^H=;)4Fdc2$pDWW)H+`@ z73xz413%fG7sKSnm`mulw$|BWcL}1zN0|hR4{>q14-dgI|w+y?7P`U~I~ zC72Q4Tov)D`AAGk*s8JsHwNND)lm7a09!FFgqsvmOG5{f-WIs{uglPa;5qeicz#C02_Uy$dNjTo;{nz+?%gs~K+UOHGzx zO+$oHQ*)*0_(a>zkZn(ucGhbjcUAbWyIq;IN)F?V)PCFqPN=zM(Y~cM=JKQuwYFi^ zTEJ*llJt_so;%{3P6R{3m*EFKqvOPg+mKGQJO0SPIsrNwGWpb;KV@k1z9M|c^ZHeS z{-D>`7nVHlN5-eLxG7T$@d7s_7l9jqIOlyd=b}FPcdH~l1;qnGTg_Lg1=U8h<^XQH zpKgmI5a&QEeq%FF3C8C$eE;zgvdar><} z+qvuJ%c|-*5T>#qmu%YKd?2OiV!KmE9GCrQrCE454J(ypz0u8`6Ub>=wTRt9&muw5 zJrqGCZ|CGfgS#wi4m#2fO+RF1Ot}QMc*(WFZbXZr;|va&7KWej@knE{))uZP6+m#Y zrR5`;zQqM@V=C2QVkE2h^S$)Pw&$wnl`P(9fv|HOc=&t)A@^JhS81jM3?WPzueuv z{Yd>-5-210vsY2;Z^xGMC&kO$(>U2<;Slm*0hcV}gk2%ts6LF)5DS5_x%9!aJ!bZ# zPX%QMbZ%A*4R|$5Yr1FO>Ledr^o-G~yswxqhWi-{IsDWqch|4lsB1k*3o})XE6seQ0GA?)9 za9eTuBV4ZQ(*-F3Zfzv?Fkmmv#ln#lMd40(Fyb>5IS_llp(D8^*2K`bzhwJ6!Fv{h zd{d{Jhvq>cni%Va+gI-^n?)b&)FR#mzKx@Oz4?P*i{>XYVHaT}RDNnDR^&z(&%HP8 zt7j98y(QmzDfdh1WJKclq*Qp4^h@U1yZTYxuOmeMCWcS>7E8hgd#L8XzLjn!(%9iw zGwUbC!2XW1{o3`_vC3L9SzRvsfzrAWc^PWi zW*f8Ak1y+34{>Cl(Ap@HByn(H2uhtEv40g()`Gqp_%g}R^^0(*mFCGP)GhHXg@d_D zjV~LqsbNn6I&u(19>#pDHl@buy1@YE-g7}14uaEy3YXHb6K^bIelI6lvlEaIT%JNt z6F}tkUSjyN$Vy)(8b4O|K1@+il)ISoGO8Yu$q+@DTCAyUh+MAzZAS_B)t2jG5*QI6vUjei(Y2E1uMX3;X$)Bb=6qtX{D-eS zCacYZ#(E9irGhq|hsg|ex-Q+8)tPRIrw4A`DIDc03{xuWG$oNy79?0qip~hPXkolq z`Y9gA2~oC1rkPSp79*qc_xsKOdf9p>=S>zpm#~QB4;j0N5M1SCI8B!wSG??RXgI@u z7QdyFJkw~x+5R9CHT(P8KBNP`L7wo5qwo)Qh*!fwE}upDNlLXvzvI<)=8UFjOZh8v z@t@yMkR`&BlA|xVpZzctQgK3^#y9V0=knK8mL71e)Xs)huP+Xg$I0r6BR252WxJ!5+h3pUU`*x^MY~ zH^A-r> z7p#Ykbg2e&&)=%N*M(+18s;AzN_yV(?PHvLMD>8k0}4V(${1m_s(D$qWi`_ELsL$& zguctZNj>NvM)0sSe_y-a_xmqKH3)irSeBBu_qIUHu1+t|dP9{=g;pA@Ur%fkUg9a; z_cQ$+d?!8-NJ_S|a6-|eF#=Hz%Z^2+52!{8#~LTh)!APh~$_i?lJi5a~ zZW$aCmIVoJR^odmL9K761tQ2_w9##PNU)(GG=4D!k;bt+d!b8a)X^$0|Z@b}D)S zh^Q+q&Cag_6lnm_<<$9|c>63kL67>a+g*i9U@`EP?5oW@`>m)^_m`p=IsmA zH@htFS2mKIEbZYP+(|uTcZEHYOSfA1`>X;0??YV~*m#d_si`t>VLxLK5TtvmQDQoT z{27FJ@rcm`q;vNj{(j>>+-$Z|Xnj9?x&I~Bt^!!4U}2Svx&xU2hj90SZ*p$SpT0Ch z8H4ip^2OBxbJv_QZSF-#j5E2cv=;B3J+_t~#TzcNz<5%T^cm{Y9ps4U)OFO;Yt zJr&UVG|6@6(P%D#DWwR5i9HDh6^g2j(mq&tKCF`)gsLcGA`mT*%r(RgynjM;YYZ#_CW@wl7N=)@VQIrAKjNMelwFn9igtHf#baKi;I0R5JZ}&6!${)i$#9?G2~(PoY}l6QG7UFDUVMMbMI* zT!lMq*AdH~^s+y6%f%kw1YtKW?elbFhc~!-c&t_rUr+cgG|qG>Ax#7{GXT8%qRZo{ zc+BCW{K5wxf;rgXO5?5o$r3W-@AGx}MWWVO&0Qu}0p4M)AHeB-n^MbJf-G@;L*lh1 zW1NA?k7r+h7+w~$aOdY>vP#5LRpY{&^(yK_)BAGIuU;bJYj$ge<=~ZH!q4? zGyV#zAvyXf>hIwuxfr3HI-cj$Z11p!yXF>Ljgz&#H$Q_#PqNZFI5xy*jyRs%d9W2t z!a2fweQ=Ll)|vl=(cbD|1gdJ;rs?BRswA2~V9;fwXrGv{kb|w({hG(@Sx{x>Y2hXdxBv?we6G(~g&glL}_Ctit0> z`<`e><1t5c^fp(S8JZbr*7}xT{kWo()5ECU=7Hx21Glx)#`AoI{&sDuzUm);;LfO>Nj0aFip=hx z=H@-0Pl1J#_@KZL8kbjxTf)(z@wn&&vGa;t4o$z6F^Dj5K!MO8e<%AE@50VCJRHzq**qnKjJT%fzZc8LJxcF zm)eyhQvV9wkh)6$2DOm%$EzF?<)~=mk_DxIrhV#5i z*|MFZgXe}x`K7{vGX{? zmj3>G!=Bh@;Cm!}-E2P$oMg-@S5!1ms)m4-o6S+<6Yey_SNE26UaQZa6FA8YTiqJX zi67P3w#|9y%JOiFTFha-mXfs+3?2Rn{G)~oDO?+l2@}7ym{XCe;9%WDK52Dh&N~iN zV_p!iDB6D0zH(!%6{MW7$vF0aM~#IB z4&fvdenC0J7q)a3{dG>QTapese8tZCd1aG@hr1JL2hX&gZ6d?Q!_luK5<7NNk&`UI zur(Y7)}}P*AHfiOX@hKU7OY*nzlc+w*X%QAJ~}?OrX3_Q9Ha1E{c52c9ET^9xn7T+ z{CTtBCxrKFQd5Ez=b`Po@#n|De%8Tl!=xI5AO`*ol>jUM(i`{Ji2uygrzY-1(eT zD*yBBou0Op-}jD`9If08o^~5%1qeUnA|M&2dwg6-&XDPK$Ii&6#kvI<6AxP`}j*h>x*KI}N}sA&BD z4rB!al#I8$fu60hh)HbItghT;aW!`NzTzm+xa6$4VGX+4B1uuvxDg5pALL|$V#F&; z-f5R^fIt9&oUkTD@U#y7Wy7eh&U>6s^W(o-REyjc8^jl&urh!XcG zC>

QLmnO%TayE3OPckeA7{z^bYh#;V;sixw|I0C@tcJoz3VSSm0-Y#96T5jm|AK zn(J7sG;A)gPzmE1?>Kv|C?+Q+u6KW0;&dP?i=}R~i@|EZY*$uwp(jEn+~iNcPx{PZMPz`G>1n;b?%Wen&!ZFBU;2+v4BZm5 zJo#tI)w6y{CY$YmrLi_h39N&q20mS~h<59jC_ex_EW^ltnCve~e7m3BVsh*tm)Vj- zuW+q*n`ZHhVSX{I`Q6LGa?y#2Gi9W>GBQ@jC`J%q33sv-w>%B6u~?YzL@WljRQ{UY z!jVKPyxC&nZ?8YX$hV4}Y|OilIr*KAxqG_nxrA?~a5b$K!G>~f$nYtU;kqC}vw1mE zL3B@RxMOZPDr3gOpJ&o8gd?M6lE08*fF+mZHbd&qZY6*1tuj{RM&{NOJ(*eDl; zx?2%t*z)>4bYT1ioZ(g#J>MEFegvDOkAGhts;=qIxex+ef*R4*AVx$i#X5owl%M~$ zd-$~yt*U-$f0qVR|9n*heQ*+qN)|9@|5a}0!nFR#<;Y(`rLBiGjg+OpMud1io|hjy z_5Byr&VV7y5TetiY05l$f~nqKl(j4se10&ZD}1}~HU!4lbG!?!)DI|8hE?9k3m_^T z7aJD+))$+WNKJ<^ZG|Xf+MBPxd1Y*+;#%wJ^X9@8-w-DMkmWxDSFU^GhGz50QPo>4%oR1P*nr8k?*hQWNo#AM!v++ zKejjGj->ltL|BL8ynWI&=c%HJlt;VFowq~7n-8v=^&j+25~aAw0GGQySI}J+$>MdM z&tFUV)UKD5h5aaM#(Rv8&&TcpQG!V@3ui@4339mY-LA_SI(*kx>d8Oipzcu`K4v%QZj6L$j3uhSgO)QtR0Z)rX-b8SU(~#sesMI)AMMt1vlm{tXh$1r^~REm)K8L z;GkLPB$WZOh|Hx$iMYhBx~y?IDDZ7K_x&|8NVA*RK(u{K$nkgu?fiw{aWV=Xm0d&< z#$#C@17*Mz^{oi`ci-obLGD1RgMe>lRGc`|eLX!PgR+52W#pGt-_+fpgFiouQ~+IC z2f|)r6jH(j!{*l2X29>&WX*ymaWoMBx-O=gp!A_0<&HU7w-hLEj_hH)xVpdeSIBbi zTm|dncmxgUHGG&0+B#@Sr`d>J37b`?IjR>E5P3CU(g5(n6n|!U(5}PVnZ_bW{XRO+ zKHF;*D|obc1Kj^$RpU^4`$3P%482~Xw4_L_?0V)@RK3J~O?vL>Y+r z&@9sv449guZ!TT&Ne@5VWFq=>h>HuyYN1xS1<_qGp5{Vkm4@CA0`&vDRckwb&CfNA zUNv?0%V;`zl5&gfNC>&uyD;Q(YFAx>&F3(GPmjT>K*v0#|T$W`wy4I^!EJI(3 zyX2x8rAlJ^jC@_7`CG(ucn{ezx1O@4!l~6SOINQAw4r)p5pgZppzbidSs*r0Agg0J z_U(LeS1Tiv32J>|?Y>Yz79c7MVeTu%X(&RZ>EsAXyVyXCS@jJV0c_>)x+=tuYDJC)@xc6~w9O@Z8~W8`uU`TykbO&+h^oA$AML+< zgi-J9*5B*9KKYWH9Sfr*8JaMcJyKPP`vGs*FzrR*Esz-MYSdb)JqfaU& z66;{yT1SIkUk;TRAO#oMr@IM?b*0^1GUi?BzdS{`RYvU2#7}UVGjiuJ46SP+&DjYO z8e>6Td$n4k`ok2 zLD;klW|x2kH8vEg>RhIY=NTSY*3DX zAxoZxYfnnICLQ7`LIcs?sUO`3WYV%uCD}5glsV$XYb8J-NLtCG8`cFpxO z4aAoj4olC79;c5H(s|ad$mHA((vroAm!NFh|Jtf!`HGIiF-$0f_;cz{gHr(%jJkhI;8jFMBKUsSmGSrG0UrZ9^?**B(KQ?Rd#!-up{O8WsBDOzWR^& zPahm|qlX3w+%i}^PGJCpnY^^g|0W3U{?Jj~$jT|M1@t|1{hvylPo z6K`YXuYV&tx;t>;x8!%ZJrcO7E2YBnO`rkw%Cd7G{k+@;BX$yizRdO%UAKu?u7?5h z+&rP94HKA^c#0W*SpPTaXCi{%Z{_jt*BZA{^za{n>*>R=J|(VR8F!4YMhr^%l-Lcw zYvDH*&&tu%y1d=pmLM|T7dBO>F|xZ7sS<)Bes*!%M;-fNFFuUu7KkmW0>7L;-$>!2 zfjbAw6=;D{K+PsK)y_)r7{i4Vhq)Ea<|(-X`B~%Kn>Sxi`nzj&WaZBW>Hpl-ufQE{ zHpsyY_&;R`tZ>&FCIEgF^QZS(q5n==f_61XV^cd>OtYoD63e*Kp9TajX(NB~BnZ{* zv=y#B(u@{^Nd!OR{yPlFRUddTFGopj(6j)lmlDXbX`RbGLm-0L^xK+)4cGm;UOGf* z23eZCTx0sJzEpGh=I#qv0X=C5T9E-w%Zh2-xRUbEour*xjuD|7%#Iym1eRG_mv?MO z>jEFPJs;`_U)s7f#}=VYkxubl+!z^S3}<_}Yj&&T?_9M4Vz{RRN4uX6j1;oYY{lZq zxG;BI$dIAo{+>UX&oUK`(*A^dWFGXrh~kzV6FLtYs#3XxxFc6E^lY|fy$%mQHPr3= z=l$cq;?E+J_*ScGT%Ufzke2w$P`SwO*%_1nx1$O`rWCp*z104uA;~O4QbfZkPLPjB88jl6N?*Z*>vubVx*TI1K zy7T%NrDI$b#KLuwv0cO^UHd`44w-$f>Fa4x<+;HwDej#65oB^lE$S3%a%k7V@^ETlb!k@HsX@2EnS4{iE>Ks0>~~2gA$_VUMwm`5vp90H_m{ za#Gt{k}RWvP{;I4#Zf)sIbgOZzldgtC7bt-h3aC-F{cm+D^zrp2?6t9ozSM2V)GY8 z%c)_ME+Ori=gpvIxycp#gpF-@ygGDv$fXX24jkZ+=r?sMe`nzA?hlW(P+NII z`!wx&s-B}RMVad;@zFpR7&aFKdV_)G_6FlMYFMQi@&Jllhx&fKd;F~)Ko?)z7an9c zB|0qcH_>XXBK;V~5Yu$Rh@|;+cuRtgYF>5|tO#IrN2e?q921`GeQCxFaPp5!u05^} z8+L4I4%`pj*v~$?LK6;eG@l+*-+;k#Si&0`>}($-5FL)XBDAB zb@>#ovb^x?yN30 za&v>Gi#7AeWN{knG#H>{-}Iw4DgWL7NIL7NCjY;UgP^FSASI`@5g>{JHIqowIZ9eLnB&eO<3hm9KP`;*Ys` zg#OFO@A(OHX90gb0feuP$6h^zylQ6JLKhC%7@Qpei5Kg)h%AZo&{33sY?Oay+^-0` znZyz%xqAql_)ylx)xOnG9;g&)Wb?lenouJ>6qwCbvu%o?gbnNt2sG@mSiYf-o)u@} zd2^&;C-Uw^F-1y=CQ(H<^Sd@h%+}OKk#0<)r+ekmY=X76Ltw>_LF~f;yqQ+00`4BL zmxz=gREHg+4)NU5y`9NY%RIB4j^IXj@$O}&l&e0ufnGL?kylghCE2@s*nVU*Lfx7D zoT|eJ%Gvtl=$x+xoM}VaW?M}#ol$a3WhC%-0zoJ2CGt=WHv>qNU=X-9;1$+ z3~^`Qk@}q=+*m9pmG@NP508XdF_fsbFZk&_>UpHp2_$Bjdn(MPf?$hN-v^_^eS4K2 zaAOZ+PJl`qVf;s|VXs`{Cv>7yaiflwyH@{#?sUYzH1#rz>zpes(oAR0#u=@$c z0f!^T-~W`!Ov-rq?h3{rP>EyHUa^o$#bj|K{Uhq5dLajkf=UH1wc z^<^A8n$2erW=|>|bCvE}n3SGQo!95p>;6a78u9H9we55kL= zoFfCwtC_!b$AIIIVylh&-At4pA~n}O#te}q%*-e<;^)xPv0&Ln`9is zc>oPb>2FSD!8@X<5u;=(*riRx$n~wQ#GJt};pzba!{NxaXn%9vZLf+a175@f*tz#4 z@nRO^nbA&T&TeHo9mu1hcDZ?QtLg+=pp4CM-qu^dtxMdp|KN&XiyFm?dI6&>!I&hJabL(6pm{a$ znZbyt%jPHNp!M;>XwK}k|;HS*=HlYvViO}A*# z3Jm@7&5+V>vH_cgRCIJFLhl1Zi$mXOUsqH|=GY&(x{Yr= zbHRp}^(E6`V9_1?LDDyLEaSJWgW%l{Tnx>(L)N47@_IAH#(f<;iD|i~6zaN;iYL0o z$#|p(idBG>-zhE&kvd;C!0(N-L9N7rl(AE2?gE0K@NH*g9@0gT!7ssG$ZUf#^EcS~ zxPj)TAyPjP+H%=z>_uR3^Ml z&l&uYaq(1Q9A|xOZ=i5!AeVHOC_;YRezvewpG(?d|E3lvoJt_U?XI5p!-(dA$$njSk=J7nZj zVj`sDlqJ#mOdhCZE2Fo5GPtg+CiB%sw{{=-jMo2e9 zwT-@hqgR^YmzptFPXE+J0&}@mx-JZ3fnZ>GUemq9E50DAqmA+Yg1c`1r8ufW^lskM zIn8On9-D9zR8`VAeZV955Lc|Z0sf)G9IX9f__NaRCG{R0*H0;LG!1fG9Vg#z>HG6W za&#o9Q$+*KlB>%?S0C$hyZe~lDE7jvQEjGfPm+-@<59xaid^e{75C=Sc8wQ$FEfw+ z-b^yoBrp^39tdp!Iea{qX`fyygpxps<2T2rVy{qrS->R5x`5Y1B@k4=CpmpBUuxsx zYGUs1_UfWs1wP;~5_}*|e6;|vazA3iLOQvP z46_p3avKn-fnR=a3nUIhx;E7jS#8_Ua?XX)uky{W?oR zKA9i2(hF`y-~irtzb9juMceFmJbZPW>@|&KIQ?ovpwSIx~B6!MxU=5TX<@%7N7xUns!X>}R0${@Ll`V^>2vY3A`kE}Y2zis?gc$-zVEZoyw<7r6+dE>tw%r?8{ z=gaid^=Fa%m6S#c^>6ybhMuVgt!2v6^7;kFLp7C@J7^-N3?jyFl(3%r&p=?@%gYtr z8Oty`?D|dg_il752Vff64q?a$4-7(1Eew^ag5>(H1m-sETif(bWXUewBJQDAQaIR@ zTx@6wfq!qrBLJO%s?9GO@-a-35#bs6=rBRThJ34lUJwRzUomQLO>SWE^$c|MEm>N$ ztg1qi1YgxRK}XvvTe{!*G^6caQi3!#L`bPIrq!RA{a@sRq%J=ZJapja^85p@NLZiz@mjfQsJ zu;TklU2E_zY)ZW$qo=Ksr(>{kxdqiSCT8X4FO(y$j9>a?rZ&d$!qO>{sT5(ZxH3fN z?VDNTc`r3N;P+|{#pD_ESgFk(YrvZmNZiXDfmlNjJmeBCRCW> z=Q~c>GJlUe*o|W|tC0yk(23sAP!Q2tx1E{_a3{{^J?45kSpOEhNa$y(suluO274Z? zB?o{CGr1F%8Q=aX>8-2H;QbzOWPZbsue30D-V13eU z%cRv1-vas${w-%5Ke%zuv*BW`W)Ak0;rEWWy2mZOr{&~pqWynmc&qGS1N%HoibJm7 zZ6tzN3^t83vL3V2{-hCQ-cg1Xk2HZ}af{TkC3<8k`9T)W(rQ_K0urZT6&L!`&ksese17cCm53dEFL@1uSd7l!FXK=@^`j#u3A)e zbpzRkRo*qRTmQJRNy6!&dGTUxu3f1=mDVM>a0lzO%$(tG-e8&`NlDAGm?NkLcJPKf z2j5Gk+Dp2n(NCl#crz&+UJ+(6SKK6JU}kgA<|x1 z(HCV@GZl%WBK7Tg8IlCPtn7661?Uy;p}F-<*&6KQQ8rUxTt$}tS=bosa({GlQE16k z*@( zp5}|T-=6>aj9Hv`gw@AO9^Djf(!JVv#GAw_D$p0d6_O(Nt`u*FLI9nd*$kh&hST(Z zE?jij*=pTWmc`^I*-2hffWDHYiGZwV*MlJQ7oAVip45;CThnyb^114>nidvn8 zFMOAZ4sGTyA;>z^TF7~fB3GCaGdDb=IS5>;zj@$`^o>1%1KVU?}so@Vq{}9c~~57FfcBz(CaoSU>~eM zB*!eaGZC>hqF0MA!;$x-M>plOkS@OAX=q7%m{N9d%Cvb0eky3HHr*zVPWJ7;EDoUS zjCG&f)o16(-%*d+F$pzC$5IQiBvG&`>b}J7ctc|F6h+S}Qo8%UfJ3bbp^4XIEQ_L?aKS36mrYq!j)6HN}Y(ZXDwa^lh=;|I;v`8yASDBft}+sJ{(-?rHKUCh6+cqlVT@ z10T4C@@6?p^NyTjIV_8y4!ap&CaQ%)2QX4~XfWJqc5-=ORU7#L^sBY_%d!jSA5Q9; z2fD^Fd(qtqn*&M|d)16rHi+89xbOGP>B1qXq_Vv^iue)wl&49z*-!Z61rPK}>T&TgauH^|KA!c}? z9r^UKRH;?5UpWR!(!Of4Zp8m+H8CBoC;VqjxPgytELW^S7FPp`{8N5$Rfm!?i7t#4 zEF34>|A?XyvX4_<7Puhh0M9b5l@NG^65ivg^G^Fh6=4-bX95llN+gPWmOYWFc`Nx( zw6S?2@dpypIfDC@!btiP5K_6 z;}Th73flaBNKoIl0k7y`Cl8eE*v>!Nepn*r;)hvxF&kUKADrE`;}pIB%PX$!v$nT+ zJ$0-iXEsvPvyHD#v?;i+AgMZXp@(%(12He+W3=!1p09~Qv^lb^qr{Se)iyS>wQ(u} zY_^?BZR@;}&Y}#&TaRt>IM>@n+w2OL%>mYJ^l4^c*~)-_*pYF0j=;wjm6Y=oS|)m+zE;|>konvDhG@7uRA z#!AlyETKVb=r#cdkXYY%wuw9ZXXAeB{JCPd?WVi!b@=8)V}uMA^wkLGp61~*LC1XS93_2l(!Slq^0S4TJe5BrMp`}HLRiN2Jpo*7Ih|o zQ*Up#M7eZbm|h+Ut;-NwLD6FE!SY#5YbmjZhtVC6uybAET1T!*ME@>sp0?!}&73v~ za0MTz_QLpLzdV+BdH}lF(;Jb_np+&)uO?qtat?8BuDudGw%@!@%)1R$RL9xuGh#SW zI+@#qf9#spA)Oj;#HTz2PQcn_DcnwF#1sip_o&QT?R~m|eo+2dMLvtAFdgbr*8+y` z4b77WWiiW3=5O4;3%sF;@_&**k_CdQMQjbk2T89=)fI-h{heItyOiv)a5XWl3>F`( z^h=a$9k|s$o<*!xAR0q1OiJUrUXog*NPHIXAJJEBH$b6rknx%~?7jNkE%qWm<=AY`901)aVY_okR?q zy7go>OO?7w=S-5XXAFzi&onvG2Tx^G^@*4513>v8R|KTiW?-DY9pR!Z+BQOxrBND8@8K-vt& zSD{JGfu*hQ0g`)m+%J7}sUv_9@k8pYS9Sfn=|*b^*8I3^U--&{V1b48I{zY&_?K)S zk2|tvomUif%~7{yYW&Fwq&G5pEOnswyIQ3S(y%%39}z>)F``8mj*HniTF{><)V@}f zH$z`9`wVI%g*^D|YufQVV(>-@w~}-S(s4kK-sf&U!rdO7^3bXI={{UR29zEBWGA^! z1=3wg{T7h;gVRoGJ<2crYT$7F*s=aM$-`u%pXM5Z4~$_Bp)4hum8Y`T;IpFZqLX$l z85WGx&v+3^wr)RFqT^3dGF_O{>MT^?zB#V7H$ogBjhpuI@4rNt)@RS&o@QV!(oQZZ zdKWF#a$q3!`|;+s(GAbdp8__5xny1+JW7K2OkSCUP2T21aIEZ7tOM5x>nvsJ-u9J- zQtIqNOT$TkojwmZlsMvXR%Wg^ zr6}PBLAf24JgM6glRZbw?__O+Lj9jNGc}UdNZ7NAblci!gt6muio-&%1ZVVk+@rZq z4W4AK7J>*1m&;dl*5uvpel=sSYuf1U#W_E)OB}YTiy=C6V5ckA#;NPJS&6M6*dK$9 zJ~M5azkBdljCv{UPuho)T2oGEncGd(V^ga)g_zE$>5jzKcLM-U{Y!OfXtl^wdUgo&bTsVtk7cQTU z1SEMMKkS;xlqBy{H>o&hDr9s3bva)Mr}o1-rYI7JQB73)0Y|A`9<17EKq(I9E{HsK zJXg_v%J-qS;0yJ)ZymT%A9gYPDfZpCe$2VXgB`E_-Zn-#6x2CU3LVx|P!VHJlNvW8*r)Xkv+l#a_oQ_7IB3LR zXZ^z9yLxQ$Z;~iO^eBi{EVyUs1KGu{O@l+nW32mYu`{M}LrrkTP-$C3q~475k)yru z^24Zyig2xwR}Cg7)_-=x0prNv$td)ETzE)+aF3?BhCi^2l%enE>;5x5hoQV!R^?uE2I&;B zJ&?GNmOcW#$*Xy_xD%wIDz#Oyh#k3S|U3>=2bN!rJpLy`#e2rJjmVX-;j&kzxCbvyz3Gk6Tu!avCMp37D2V! zSvQGaK!lLWNu_~8G8-h`BNgP>gcg~q2`?TFQGQX>utVIlS_HLPg?io(16@Of>vK}^ zTBkaq{sG5qnR73kK_woVZ{Izs)zwqBoQv+m0xDh{g_Atpz>E6I$ol< zJd(KH^LeS=@s8x*19 z+h#uT<2XR$kt2ZWf49`kKs*F*$vWIU%OXMKsMf_=Hj#&!A2a!};yM+KOlNE!fS6I# zQSe7+(I_2uN(p5Fy?cd)}s2~~N zXo|IY|B74c7D_Yii`{3}gG6EGAf;XlFy0?sn*?&;PGSD`$RO`7SrTT0eKTPV#J?J< z#AM+jYT*QDJaiV(Yq03?B_85`TAMtqL5hhC-kogSS^M{O;CiR_0${Ox$(df=YjIub zLo=3Wc^)GQ6mJ0E*>6}^X9BxIwl+1AcBJ2X+iR>P_LE0_qK)Du^V7F*zO(Ev|Dw4d zZ6QMpGPiyAI2x$;5XD>Bj>-O3j{TSY8AV&{Gu*9kA+9S0v|KH zyrbf4&H<>QPO$jPmEQLE7))$ex%)v*cZ)-vrsofq5m`IN?4 zZ#$$oNfNtE{bz+>!h-p~59)8oZR9PnxZLLgw+w>*sHALHD8sVP81;^~LK|ZDGO(M< z(*PZBUy&XkFsGmQRD4Bsbr1tJG483c?-nAq?d1BbF(zJ+{2$S3-AWzx$2RFzHkPvw z*s`dTWTiI+eMiXM^AL~B#$)U7>U-QNn)76bX4)QJn;Sguce5|cbgxS#!pRI8J!yj8 zpSn$=9vw{SG&aMt%~XU=lA~MW{#Cztmqh#|;Wv{4K+07)d((!?+B3#DchD2$TRILFUC0j#gjIEBk6%LXt(Mj#>r78Lu;dYL!8f~a)&allZ`k>*ECRok( z0D8|{!ST%Z`eUaXzfDR@e?uujWbySj5t+UGjzt(iKU#4dD(OKKLFwBhEZ`NJ+g`&60>uYWl9hV-z*~t{=M@N z0{Ml923J2x|MRpm7iDogFjxJ&P9S13w)VEvZY2N#)X^vc5CzocY-q ztqVKXC?T-Y08|`at+?-1@bHE$41(u(bSTFkth~z7m~(rD49k{kFWX7eb~)B|6XI;D z$u#?T{2dC4jzUD8bSRMTmYG+IY! zMH8n6|M8VwhA_x(=*8uB%ct~diZlU{^zj4L3(56Bj#}whN1(J*IBw;Bn;~~pV}!_* zus@~H{`<^f^fM3iF$LAm`}hQ|9Am_DyAQZhop8!4bx5~-m<|0LVqfJ<2kX)P(>X#0 zuU>|4b5?wcW6u#Plw7Q_w6vUe-mz;)Hw+C8LOZ4)88gDB-y0wQFhoJ_dw?8^T=N-c zg(X>Bt9T9O-1oEl?J2MaUxEz);KG5*hr5qgAB={TG1%57ZARDBRp@B;N53yXCK^?O zjepL{Tb2F8ABc9X%fiXti+wT5^}@%rXh{{u?S@v_H6R^hf)G8A6jQIXf?5YUq*}DE zMr@c}pj!czZbyPeEc3&!bxp$(TBWMd<;EH;ACbGW*tvK~i2}Vz|GO$un;L7-O|e|4 zW;!VY2*XR7-!KG5Y z!HD`IEzQn>t@l~Be3rCmwb1wURbk!L?>7kyU5m;?_lchPP%mQH%_B#GFC`5qS zv#mdVeEGsCW9`As-|m}Jo(#-FtAu%*-mm(1vVcAGu0T?$Nq}2Dlqx4x-Y8Nu>8zT+ zH-aBW-3MW?+=WLn33-@`U8gb78#DKOXzCi;+)eNLbuDG}Qr*dM!hVJ;DcHq)J&Y*a z&bD=`TI3G@Zg8`o#-*{Ebf5w)b$p1Wp(Ox+9+e|cjOTHq|8zTUIJp@R6?Ry{l#KBBm>!aTKgu=8~gVL^B6^7=F_zzC&8jwzrJ7B_u3#1J(4^lCqK& zuMOw_v3$B}aNqNx_lmkbMC1n~x(xGG&b(fqLXf-wB>P6*`R4vi8{kNZPbqkkzcH>i zSLL_X;p~@31OFzl--e^@>yQ#Cr_6XYf8@>=f|)4`m%Fg(D68HE%I83Iesu8@EJANCca%0_N<|8 zrp@Yyt2H--cDQakjccS413@z995iY!wki{=zno69#Q!`pNf_?&`L@s?IL;}{+~X6` zuwY6j2W^#ie*3O{*k%8$6@t}WKxf6?(1g96#9+_kB-ykWlj!IV(r@V~G`rn7$}N3h zNg-h`mL79q_P&krtUo@2W(P<)@q9!xH-O27$@D;srVJzQpoyWXoj>I}%yWEI@alU8 zU6_QEB}Vo1-Vnm_1Dwv{>`QrO4+s56vJZa{a%=60^Oj@X!>LPddOK?v`#WQnX#kwD z9YGVvo`ueFC)=}m`}fqz9p$Rpk{`6jIr3hNvFWQ6HI_;|8&~rM0~YP6)?PN2`i}@^ zpg@T9FUeoU)~2uNR@QA@CmyxzmaRqv5}4+IYhEgg^RnVhn{gnig#&V>z5bC|v)MdJ zh7EUdy#3n^v7cowm9zu*RYE=Y46(46KxEB>;{k9^(VnDN<2`mlg{gz|tfbPrL%nJw zCSu{=&b8_EWRi`I1I4v-p`yr}1xVchfleI7h+tWvz8MSXkZ;C)nE>mQ;Xj#A_qGH= zkAE({9K1iowQRvw=432jRzpS)Oo5~e_&H!L&A!>wc_+*fTiMq#1`kJvR*rNsbXS7^ zbvrbZbbSW1gvRqYINdR;U2Dl8hZjVcxw-wOde}ZM%iHGZ;96a=Rpq|C?6}KNa?f0o z3CN*CV$)Twn^6kvq4MqauRy9LrCv39ICc;aPC^uE)y10WP%mP=?du`XN_;`-Mkaf-ohG9DK8HLJx zdh0)%{6f2oYwtbAK)pZqS>ai*uITD?lzbkKf?kjC-DWX?#uYk0*&>1AJB#W8ULy^< zzGJBE@xNX$TCaY+KL3Z_`pH~B)dYu-BdEFbpWsvZZ5%bFHl1TabOSKwU6`K`3Ygzc zpDJi=V6NoC70+L4KlfNLHtVY~(2koRA^MNVui|}b*=wj8S$vx_wiC^XR0pzy{XVT> z*E*+Gb)H@1JTH1Qg5LZ_#UbtxJ%|m&zS0hVgb(bGAcn{P+K2arVk#vZa@9y@TJ*s;2@>U5t9@8Y2TF%2XN)9VyAPZL9Wx=?S=I=6iqv=ZrU zIn8HRKBO=es1wp;P5=rd^i;0xQSv@*R!RUY`lxOj%mF+r08 zh2LTY&+F2<1~1P0Zb0~Wv>v?;{vGmil1+^A{dSe4l{i9#*pxM4pj{ zvdQbuR^{(yBuE&)7uOi;e~F;_qbbYDHP`PD>`0E|7WgTF{jz8Z`l*Sk`g6f|C~FIK z9x4fZE~~j@RXVphgR^`N_?D--S1YEgo7W8t5S9ErKXiTa+4*#amKP@jcf++h9!(PN zp~9s&$b;;<1Fd;yO1}orA*W-npRRteRHtPWeN{;y9$PHup8r;P9C$;1!F_?_?Fl+n zqBe8(QK)X}8PH30v3*}-nXkLSnv^1HQ=VlCp$5@aNPW%DkGJ!=tDWQP?@3xJ^?0=+ zO?Q?REA{&nhA3DVKo6uUa+*msV}u`Cx_U1dx)Xu6qXAM{s_O(Tn; z1HR&-=RrBo2gTx6JIbO;bam?uzC52ws@mJN@>R>V%08YexBn*fh*deN?M4biefHwtJC+FGj9cQ8cK|&E+0W@Wu>S;fx?I=VWU6a_>$b6SOG(0EB#f zyQ?XXWFG?fYwCvRJXO;_{LEZd8glX^DPq(wMbwn<)VTrtN#sT9{K*33%Q}2VYKIXn zo4#wkN%;<28S`7IEfxDL91wUEoh}NN7wMC`Ar-+khqgDnF;{Q?Bl_;9be=5yXzMH_ z5K%y;@Pmur*VdO`l|uQ>aj4(kqIaekgW?4c`LMkHaZ(B-49}Vl!p8USLfs@FJ6KZN zkmElNbzK*@1>PN{M7gLI$5;>>$`#MRGGcW*-^e`k#qgBWU5DLngK+7O*%w7;X$AXo z`%yw|*Pq?(J}6!V4k+e@p25W9+M_)<3`^d(m&}bBHluJ#(>1It`(FDpS0+yg_xtP_iK-J0 zs+z)ZL4&F*!o4-n5#&C&ZE*=le2oy;6~SlnE71o*r(XX~=xflqGyGVRd~%$7Z0hN6 zGdrQ03jN-+d>@^V@x3eIxCqib6$5#)M}<>%fE#_BQN%n-?)(Fwrpj21aG z4HaZq*#9AjcRXY5z?Q{5#o2uNkH~&sZOS0tmy!@-!X5WnjbwP!mZU?OnQ6dd5}@YS zNAa(#W9_UR$hhuRedyY$>S4uC7Cw2g1 zECg{aSYQwRfI-b%m-9XB^(U8^w&Rtdv8T(WrIumOxG2$O=rPWcSmz;}6Fede`TQLC z;TM#iQU4J!!!|P!X87m7e*O9r=pr>+qqde?chB+9i_g^D@gy~)IwcXKz|&J5SJQ|W zMQtoGFAIUxzaKtcrFq$*D>nZhQQ+by15ae&*<2HHwuN%0sRwG>yURjhSMj3xd^11X z*H}10{$?JD)N~GZ8G}+UlNZqIcCArg1`nU~-%(3;k3i$4^j^Oz#n(VwA^MCQ+= zmo?V@I{;nF&Hf6;qlHKs96I4P|Lh}DsK z?6$9|^|R}$!%fZVh$l{>^-rP<#76juG-O!dG*rZ#pfsaAB^Q<<>)@LwLDnzVP{mUe z(aMi;E#h0@zTK6Uf}ax3gKrEV8w@=ski`Fp#48(t@3h(NKRskQbH|6eaWQ3Y?FQ=? zC8g754w4qM9$}V$X#zJ{-zmut(B&|0yg~IrokzOO3?ha!gO}kSh&WQucPA+LEq7b%Mr|o$O7A9d=qkjP zxKptf(O2dGLfiU{ok1a+Furawf)&!u=@G#a6o*gjqQ)+$bmY?tan|E_s&TI$re#I1 zKId5?Fc=u64L*K!(yE8&o$KFzXUsvhXMJTx`i?0JeEj(ZYC`VkKvrUzmthcm{$(jCBKjc}F$rrsUhZQ+?4MsAMsT>8e-6$Z-HH-~_|lKhXtz`iU- z0X3x5l0-!PnpjBPBhQr|eZDudT97QY?~X}j#zSvJ{if!*mk6PL*S#Au5dSA;T`~)U zUCgItpSnk4>w?fCO)k)+iecfbOdTa>W`a_yh;n&RY-nO)<=YQBBo z7JL=TX+3n*72_Gp8N=-e6|JmK@y@?@{{<+Vy1d_IbbOC@)k$dYpa{oa2vfEHdl3SO z1HBQ@mwTxckn(QIJfWP6Fa3kq9Wwf<9VIf!)@dN8emlte$SO%+2gAQVwh|-+)-YMe?W>f4fpOg?r$dU|9qKPqurny_lw`nA%H+iatNgNKOzdGO9yXrb4`{}jkI1c zYUFXP=QnA-rjO=Kw}QzOj=Ny;_!8X8Ss0e150KN}Uv)j2Xin^xOJ4$6KX*@+x6+u@ zD0ItJV;-T&iMPba{Ji0~ZVbCu=yGOJ9m~+6n(@bwJHHUezW+Gg?#|sESYZk70?0 zgC%HZpS$k9T|kGG^8MPF3$7@kd#t|adSvav>bloJOmie090^jqyLQJNYpT^t^%K;|ry2#XLWJf@?36HIIEepI&VSAr^O0qJ*U2nYp98xe z)F4=t_ecX~v8>i$*Z5k)?ydN^z8ZD8!tUuNnlZKGNsu9$s-Y~%uD54)()BBMsQHsf zBS(e_p`+>)-xTqbnd7hDMsfM%rX>{WH@OH7YIDOP^eosuONyMe`lO-qt>paflwB)s z6y9$`9pp0rfpsn}DDnYD+?c=4Ujt(!0n0Ygn@9zBp4f7cB>;Ds~;wuR%vmwh+TPEdn<* zCtW}0FQW4mwIB7=(@w7;IH5>=pnH~)hiW?v#RG7jnOP8Qi8)s{V*E-&E?$ZpIJ z&{WzrC@sT)E5cOEtG5zpbxuTMq>x8ppIt9{ln*H`(@h z^Q4jV=gZ`WDcpgJB1(auQq)d&&t3Dg#Q(M-SoEk#*I?na6K+`}gatWOd|q{2qg#~y zrs$v^=4y1l!fz#US!lweKz~y5c7n4AmqQHE_y;fYID{!#iHjHuJ>#3_P%#4)zD8v- zwz!IvBD&AwcNd0;r16tJy}ZqlnN%ebi^c_horC*fI=J+K2l!0S5rU~l)*}`&wWgO&HT=m@e z#1@==#t!UHL2v+{!Z8v=UoyXAO@R%J^l0Fqb@A;kE-Cu=!g>^KpY8uZma06hPIL2x zw*Z93iG#AfirS!VM(`1m`N^cvk5<)bhE=Ue=kuKP+nP245A~VYewqKKTAYC__#91X zU4L59YR|B5Y@MmE0?9Os@e;xYhrh2yK71uQciYHsG;CR}@f|)ty=PeD3n%xV>AdOf zF@r>HZAsD=ZPWLun;w#d@nKfcu#6-945g*z8Sc?X)2>F@&WhIHH*W&!+c&0*?41a5 zQ;IA>t!LzTt1E>dJEI!>8#HySH4jb&58XwN@4Z?UZo<8p=Bdr6R1=ZN)0Hvvl? z?=hzxzJ;;(`neutdgi0lNY)2I0-fRY>igU1Z;Foz_SmJ^$SQ&%#%4a?@qGSQhtkcYMd7`8_(vnNWsvFV6PcclD+kGI@V2rsz37#U!~4WHwZ# zIpB^6@*wTjakT$O&dB%bVhRmCNe>@?8Vfo*?ejMn!N(XRTq3t8tzE7>t5&kXE(Ngy zMG5Gfpi}tK_z%~{gYyVyr;MrV(dhj=M^8=Sq_+x6HW`FZiG^eagNa^V6f4& zEj-;I*6;{ny`=2i@x-I?aNTn7hdR4H6f~G`jte~t#3@C$Nn<`@kI(*0xA~U^wOMSl za{+|KB5k(*#ku| zJi6v$&68!2kDi@qbR2;vHG{J#?$^N&(pm3`HivokX36bp^Aw<4;jJ@;?fD1i7EL*0 zP0g7vs?wj3@H!=Ve>}2dbDiveZ)vK2?ln-xzl0PF>}Z5IA0t=TTeyDYg<(=K2kIip zhoR*|t}tuUvx52sOr445i)*gfo)rqf>V^cL`Qy7+fqvb+%z`^Wmckd&!8KhSCR@2Z zY1NVDWpW^sv5!uhJtMC$yd@o9l%VR%*Q*FR0#Kua>yS9d# zYEOZhgwDhU=~e$s0KPI%SbwKj45Iw@jVZi+T2R<-T0p2o^T*PvxW>d116+Ok)=z)d@5SiF;4G=uGFgc;AhO`=A#KhsCO%ND=U|%sTzeU? z^=9AK74zP(ysT{CgF-`9z1XF$W8DaLm@2z|MhiCdP{mG`w0&pN+c#H3%y`u)2c)8B zYcnp%!=voZ_|3hv)f{GY>oMrn9`w##K-hQm8%8Ox>dHl)7k|(`lpTb|;2Y^Lz zp4b}jB!&%(`jh)kzrZV1z-nX2AKZUf{_8zwX09C*?3a;Ak{wf@v%`-hlO<{hb^0}! zQa2i;`$X;kKb;g!vZJq^R~1e>X83kR<`lwJrI?wuwNIQp-)8afbHq7+$(sFJTG2&6 zUeU#yu82pii9=0Ivoo(Z)TEY0@5NjuOX|vksKnW}mqeGSYbdX||cU@T`F^o3(K1Mep6jrNqU<(?7Qr=N^kBm%j_YS~XOJG>XGiof;+PTG3F+eNAJ8j@NZ0S?j6U*LmaJa-eAv?QA?aXEfi^6I0=1A zeB^-6v~d!F!qfpW36tcQW)ZML)xrCURQ={bgY`$LLpH->I9W%^1 z^!5PTkqr9@2J6~ewjt+6LK1zY{|-V_+G3)ENCRA|ArQdjvo?z9k!5A^?(Vj!w;+_J z=1D3=-SE$PJn5J{`H$$XwVpe5maD>Cte|}mBX?t=)EexM=D_RRYx@}x9gd{eG&z(p zewMx=ZU1q~3?bSoMTITxCrr3)E$MiV=#l}0VvKv5Dr&JXtiAt06=)&q-!Ed*70;J5 z;VwYfJj4fTW^eXhI<$Y1TwmOC?ELUdJzw2wbmJ{sj^>FhDOSgtgY5IFYp9DvAv6Eg zu+Yw>Liqk{>WGS)G{w3ZNDHPy&NMx@{00c-!NixG2=&buzzy;C{RtZc%{JM9rf>P; z;=G2ShoSY+t*0Z&#S&DO_UXwZvmLJ288Dc{I1=0qnWtuDq(k1V9WEvUlcpt|{JQCk3hMBebiEKB zTS?neA&xi9YB~-77I-$_I%|*L{V5#h|C}A&zx~!ZAS>yDhOg2wIr_z88qvSFA+1bKLI6wW5qO%Tb^6jJe2q|en7&R1-?#>a? z5+dE5(lxrI8w3<7rMtVkK|+wO0n&^d`Fo%DUv^zwjP3c}_kHejKF37l(fA2a+#G@u zatRwbsf`{f5b?k4y_-K(e6k(0)LrUt+Vz$q8#^RvhA@Ng$aUs}@E;)$_EKApoc+yR zWWV`F8YU&y`>5}grrCaxr!!cuxQ!HJp;7J7Z~1B`PxIWXjD70<)q0@Svu&tpK~_BS zIQ3LpG|$m(dqzE8KE1aJK$LX+88aXO=^Ej+8%cphUA2I4qp%LEA_z8q8z$ME$%GmA zJ=;9hX{Y*|0R97x~l#Bqqp(5v`AGtA5VI%m?f8=Mch3F$G~+{6c+ z4F)g*`p&{^7wusXR)>u97L*tkv%DszQL^C6(Ec^^lMSVlx2ooe`cK#N9n$^T3FfUL z1fD=MkiJJf{P=b2c;@CQ?RLN*f~fN3A$4j#Z8f*o1e&JzK$d9mEWz&lgNk@YZFo_5 z5Z2k?GRy&|vLOz?fe1)x;IDP@Y zi8N`Ai)&JL^vlZP)NpbA>zZvbP~$iEA80h=j%s7+n!|S>J?EV;&$3Eb@7l|(2RG(s zU-_sC!p6O?=W(cdCtM&px-9b+YP2na;k(cIp-?hd*^w?UXIoZy7N4;$Eqhv?(*IGGQMEtSi;?I7&t z3^}i$Apr_E$qU0Veio%t@{T3P`J|swVvePu)F=#bfxyE6m~I2zUZiti<5d@?T^qei zU){1HV`#qfhO^0bb1+X5kNc7CvNN#Zk+QoT%V9&p;V%Z7>5SW2^<*seg)Cj%R%>Zf zH^?!Aj;p5kgsJL5u^d}9~)y0Vdl z460(Kt@G{m=}npq8x9AnkGFVRi?2WaDrNrY)z-~#dE&$G@$1hCL*vbyOa^ie(ZBgV zb7s-;k-Ok9xv}|2$&pCYcOE{~g2Z*PNwAjgA2l$JD8eKymyEBpp*uK`be7R&+8Oa` zkVur1MQQ0@aPrA8>HQ7>aWN*25aJgP8h+-k=?edTlHL#WkrRUz#xYXBP8)7{TPCPr;9tF87fqTZlb##i?nE21)om=T&*rMvdbDJ!SREtDO!wRZ`%)btdTT zxX$eOws*!}`*}j5fNyePKz3JaGpzRRlj_erRqU2?qj`Di&z{2?5}_Vny`HaqXSYtN zgDx@UfeEPrpj_lO4sQwm1O3J$cq9UT_BB^x{6W|lwy2le=NmcP9iwi5l%5?{ZT|5A zaeY#;7@_<=d$$#~3NB=boLX<8cG$~BGJOsr>9*=7M9M28O&+8l)nXP6<~XZ3F$#B{ zq<5Eft)t0sFYxjmE-;N7Z*}@xD)=EZu%~Mk^JiRG`U0zXrkiT*K&E3GORJuRW3TkS zeI39u-*@x}cvLTHejl?rKU1u&91(8NH~G%KBJG$VS-$x!SP|7#OsUsKwG)RhM+Qai zgnv!P2b_&Ze>naFT_698#CJJsRNS-;+KdtJJ{r9s>23@Jr(%CaT<;NxHz_r>(7Sg>d?{>1Ym( z*_^8ugZ#ADRx7gKHW;@PxJ$pytel@|!ZJ%s(ZI*TJbdA#hIewkTDPM3D*7FVSTnBb zwn6Q|q|m>%dS(pb)*;>}_Y)HpWrh)370u!=1qBsznx#d#a+yV$4*4qltDBJoa83KN zujd{X^rxBmihK)`{EB>wa=@!x75=X_pJ)@0C-JBLhJC%z`kGiQrad7Nf8AOA_oUGw zEjW%(Q7lB)FK%;gD7ti?m~^fM6lHA(qs!4BJ#r((K|%4wk?C=7K~nPZD0mgCS@EDWsjB zS|tWTmaywelGzG0g0<9;94^@P=GAQ>g=g8DjN{|+xxiB%VX><-nNku7`M(ns`B$qV zmOFfrAH~vKkK!PRO6}PJd&}n$7gTr#E^6G4@bCx3hx8#Pv&;SF1irQS2}STu*j=KT z-Z-fo1u>&o)FlG;! zp2Nh>?UJbl>uZ8zq)#>Z(6J=L zb<@T?|Mo}nOXv%ii#BwD0~^Sck41deB9+KAZ$NeNOswyV$}?*smnF6Y)v)c<@V(2p1(m`?mQYaIaeiEd8IbXe(g+v)OC2xBaJuC@F;L0^X?+tKRVmSbCb&@ zD=Z7dYvSXN^Az8O_dHEQmoeClF9Eho&+KSY`|L?x=~S2|&&v6AQRkA#i2Bi>rerEw{JvD?Xob@eY_lr&IF33G_=`sXX`lHM_r-23^R{SP0#U>?^-a zS6cHa``vz_sH|qtErY-urollJ2YW_~3GQ^`ZX9?o+&bef+%6>2&5_2KNhzk+` zrzleG(`-W!L9Z9*#%Jk;L-zouz-gz^R!upOGk0_St6)mbMbP45^N{n>;wIU!sHk#c zg2~ImKeTg<)-^5di_ULS{u0W(yRu?%ZHG_wttgieQ<5KG^*tM_kT9aDdl&t#2CGb^*f3DD_v^C78uwc{h!uqik#WPLMCq z_EGTZTOg3hhY)&-Q*OnF$M4=`nma01oyoTGO_EJ{lhRrxUuJotm7+Fxi9PphGz=d< zZOR}ZgI#0^5Arc8nQ7}OlR`E0xO_dhQKMtgfqTcOQdZLkm?1!vl>lJrUy;2w!3%s1 z%ayO6FGx`SSJ~lL*2lrHJc5l1$1o*8zYqk(w_8Y^Nw0G813`>GCQV+&N+z|MQER)+ zo9N-QQv>imEh3~LPCGV=tRAp7m24pYt^5a45^_MC5e8UaxsCckMH+;2CsoH0P>PN> zfGd_5_Asyzh<|P!FY&WWiDwL0u>`zsQdZYSw@%dM z*@%WI6_z6Y4bsOqDh`;9?ttlWE+JneULtcc<45PjNVkpZdh}+;`!B|BL8zk?q2ET$ z^am!)b~EE$xpVsGrfJM*5M*vg;p+dg(b zeTP~S5c?Y<*K8nzXDEl6HW3X0;JqB9PYUU)%EGV#fiZFd0mliWI)$w|aweG@=*f%o zl*OSOiO7K*NLS)y&N5k7=&eeJhk1S|4VD3DxPd5*Hf}#hBBbFQ^ULmB=AkkvQ@*>> z?Z-P)IIfdLu%x(%X-@DZy)R>km9)nSo`78RD>lY2BaseF5T-Xxk7Xj&V0812vcDbU zl$kSwWlmp8`znO1RlPrXthIVB&26ag9r`y}eTsHpXY1Q5&? z1YM+YUWnr~z#XWUV1b}z`{>sfm;I9=78AzURa7lE+=pqwsyQh-Rx z?_vvp`utSwi@3|cg^x)oTQg8`9*vaSH3H+aZJm1hlKzLft3upN!6anMPTScta0pr+pWROVr`Mq;D|o zdGmdomF_Ne{9BLop(a|pTaymDV55=4@?r!V)ymqDoBZx;7svWn%!gRkZ={4UlRBq1 zHMK|)W(o4T6%1_0RkixVeZ($g@jY2#@g0_FAoovf)^4|hjsJmMfNj9VJLM;{B0wy1 z(Aww}!3&sTWHmbvQkt!L2G%99c6tEH%lG%g$J4M8-KGR%rX3DcZup3soT~{R;6$+K zP@@R}Y(^G+CJwfz{6XZ{kKZcM6@D)-O)092_%9N<2tg_Xd;+#%>}-`A8ucB{&8xAF z-PV`Hj6~zyK6;@Q+@v9kKG1+Pe6-3y-~azc3Hs_m0xB4!M*=+8J<~vqae8yTS^Jeh z;?y&a8H;j{aGBNA;>lXXyk=aYQzyzw1R1R&$K_?8xlT$~&7$`HvAMl7`Jx;+4WQbTQiKTu2 z@IJ?2oJRLai(uQ~q?Wu(YgFav4-m#U^xi>K0zKmQ^$Q**82^lE7W3L_n@}kO#I@IH zxk}u|0t-r^g<6XpqNGm^b^62KD|Xr9`X$^_+c6Q-M@QNP3p)wQKIVy{b*-2Q13@ei zM@AHmhhm3Q!u*VX(<0)Hp6@wrlPqdyFy&);w^R!h22pDi&MI9T}fs-D7$oDGDDmwXAUU+~gUlM;&Joy(`6E z(K%0V<)b3rR#%R*jI$)>JykJchP`C@V4*s`81nnLzq-?rnmgR8@t0O zr1BsYcVvhd}krWwh5No6#CS6shywWxkS`}I?}Q3|0NrYohU06rEY zN-JIE`>*Ulzl(#5pZMHw&BuC!P4M*$TGi>++=_R_g;y0NkpFzM&L4uv?Qh(^y+ynU zU88hcSus4;G{%rLwA^8wOAuXHTMXk1<5@K^YS0iNV959wn?7xL4elksroG{p+wW<^ zuPSwQE?fSfCq2k>$T8OX{0=J)8&~LMPJzw4UsD8Yj=r{#rPF(ZGuqf4n}n;=x5!u@ z3Rx!<`3{@DeIc-G&kp$&ZDzaKgY2VpgLC6D@{R*Vt=%8^23%lJ3X_kcO$oynjBa~7 zc@DWrjG5+UwWIi?d3iZ$;nZVT`{JtJ0az7)!D?w}eNT~iZZOl;OUnmb36CoQQ>Cob zXWXDZhd#u+dA!ePLF@{txN}O zV?VWmH*4?`B85i9;`aGRVT(5gh=@YL9|X~RA-^QcfT>3Kf=RD+)K-vbcyd1Vko6b& z>KOFmR;~LAGh#s2Hqjf}&{+ynY}cn@zbc zi%E~d0=I6c?{e@I-_cTK_l%q@V&+{_tFrD>>)e;8iX( z*0-f}ISM1gPHE%zL0A&v2RxMJoH(|NdT$7F1MIDE$Uy@_@ar8$az?Ssnd=HGwU`2{ zBM1^h>|2amTkIXgN)}q#ieYgidtGH=z|w>*4@6*ueFNHL#Ox5O`dpX!0#2^}pR@T9!&g z6K@3z9prqk3+lVAeY?Id$2~+uE_QEe$pQ}vE}<-z$?0^H<)>fl#A?53d;IRy^>%qM zSAP;=bK~0B?m&dd8GPq~u~rlVXW=4WHzaPalj`jOi@CXTWFQE(3^RTkjy}AH1qz|p z`pgWMl1s+x=}qa{grld?p=IqSo4g$Xh!Y%a@Gb}e-%4px2>bB@Pdb~9SVWqfFG{e) znK*V8;}AvA(4;kGEY@a2Z3II99qdXV{+IUVCgq2gQkt&rK+{GEVCxOsSvn=*)p_Wjv_>rOh>PmTq6`I%!MQObR z^3k{yReUZ?>hk>aj9zDIe@M>u>B`Vq@sC=f9MUFPwi5CZN=qRYy@;K-h|f3tC)%YM z^Ld*aR{8a_8*ukRq4CV&1A*GDT+$=;rymBL-EG|G@jwaR5EnUWKc=l%@_bHSh#pxN ze!^smaOJg-<#M0E^Q8CZDvUkue9j5bZuH2A5N2+)2WLz-(UsR~=2Wv+9j!V>!FjIA zV2XMWt_K}y1Jr&aZrM!P&o4k#s5zLz(R?>9l6>>52Rtq9OOpmyCdg>^8$VNfj5wN0 zz5RIy49gon{j-619@t5hzv7$lo%av?3qNsQphv8ch z(Pu|+WN~j_XMj;6q{CXOW`)5Z+~Ah&uJ;hY2mI#6Xp6xeF`s!U#j4U6S(B9Ys;~PZ zG`eg#@uVDaq#nB2i#3@*I>Oz`-W0-Sc7@3^0=UoV^ZN%a5e zFhMx@c0Pxd81iXEt1mD26PJSTBGx+Gq{d0%rZ(jJjn(mijIHSgcqWnvtA?l@qG3Cn zpFthe+0-(GlFOguze;UC;$)0?4tYj1taDU{m^hC$*DUC5d3uEQ_mGPbP|*vGrK0Gtq_X7FfxzW2lS z<{$rGIK%q~OaCiS{flv@;m_69`@($mfIaIdnr6(_b7`K7N1o)O4AL%-JH&vS|6x`^Pd$%W0D^6R8JAGRC&O}w!=4yjx4HAzZU zdc~Uu*|PRh=GL#Fym4V!o>%|E+ey$N2KTCM zX=Bw9v4#guQ*j)QfrIw3JZX%eww8X=&|+FoD0xt?vt?6qAO*csfh?ES05;lawq$LIQG*Z0Fn5(!W}u*a zjf_}-ptoLxn>H(8x^rEWe&!g(tNBCp7fQCjHF9hCYd)};+d$Gcc*Uq zZecynO~m^GZj-WfFikF@qROXPCL}^B(8V!XwJz^`1LgDi3TQcBpJ~sjd)_%mE+*ri zuooZIn-a~Vh<7~3(FlIZYdqI3B+It$Vrj%onzeN?uHtSl6Jwvlq;wN!mHj~ADJoyV zDyll*LpUThfA>R}^&(jDV?yx?sd#Q2T?2IK64~~?QIT|TRt=su*8G-B)2SD|SuI(W z@@L`DJmo^_!>J~ZqVfq<^+%i^tvT&3U9UoIG$-nAY+GZ{3S~x167TX_Uc?ThJ)woJI zT5Z&e5@t}AeeQbZ5wyLL>B-;8+E*q~`abS27Rrm)FvuUo7m}$o=sbi21z$K}^yYRZ zugYwPYbaeC`hO)mT(NoYCM>UA<#YWUwqvkC293OO4l`zHoU;(782b6kScR&esw5d* z0bd@S>FAi(dZ&+uE7af{W`BU|Jh8di*7sQ2m4+AQih+`!!^zo4Xk8Bw{+0lAmrOZd zzacr8o@NlTe)Z;HFg@9@@uKs4mrR9jgn8cI#Ll}1R!<)tad zOinS{#6O+1bspt$GqE}s6U^(|98AZpjhMqrqg5pDrx>pGomVhv2lq5gA#1tMO)wPK z;ca3(%t7^KgQxnBe=^KNe_nw}!tdOshWNEW#BM#WmoY%V%H0hR$_U=prpo?F;Sn5X zsLWh_%OQOl#DGXq=z(zKB@emg~PSyLqDsB}f)F8?*|z;l9LgP!}tntP^xF>H!S z^ZXAP?_l#0Cz5H>v>>qt5|)kvRe_&VtA?rwBlK1$TzLSRkA5TdmT zE=x7C^VR1lFX_0c84yrCbD2ms8dKqEdLu%(G1mm)t3N!;ck5EeO}(90&Z5rHy8>rfh#X@$$0j$h4{KA;PbF@ zJyM*uJ{oIf|POCV6Azl})yYn=7E8`-D3QAd`)X^c#Jv-^*=PZ>QJ>&+S zIp4QmdW5;vy{u$q;KE7k7e*(;kqb@%Hjhx)kkHcRqczQ5g}+ym%M z=Xl9+dvP4>7$SvRF&K(Ll@;u}|03dz!3OAH5syD7+}oQf)v}->smwHm~ z#ZD2q*KH7bhxP9$51>8->C2#-&huvWgS}TtFKsFulqLT6oP%IUPodm z&>)1<+Z0k8lP=sl3aUXr+2nylOQ5mS`vQH(DMIF>p91fsU!tUpHt(8Z{C4r)$z1IM z0OP1bdwP7`FAbDJEENVj#{@_o+m$dhRD0t>vB4vw6@aGz2RUJQvKUAyCQuGCBk?)` zf&-{uG#Q1oP`)8Pbt0R8EGcT$Rq{R z$$85pO7PY*EwcNUzQIc@F`}*Za7C=id6*rkR3FhrmYZhW=B78eDrk6v(4Th;Yk6)P zSKHUvF_h9TUf4qKLYIp{Y0HJTcONY-HNjmCXr4D2NE#K$-ezTH(8G0oinPl0Yc+|qv&a2E+ z+1oZMLQh{9byZI9-3sHGrbSpByXbIRQ6kw2ZA-J(OJFD#)>SA;($;0SnznOcZ=Q%< z40v^sKjlBad=!4l>Y&>$7dPo?`{M2|jQ~F3qFFrYCE(r{T-0!}`$Aoz690C&J{V(^ zj#wygXPYb-YIzoFLJ`Pw_)L!oEG&{e?^pTVD5{2LWQVZ7Jr^i)Obc>xB62osN~aLK5VQ;3?B5#-*T3xH^7hnW6^8&hflNuLpY+S7-Yq?I=_u6|KHnX zwGm}P9*BLIOW=Xz+!&N#jO|-zXkF|gv1`%~tU4)Ou8m=}-AQLUon>v=N!JQEpp=&S z$jqMx+y_K&U`y~l51Wv6F;0_xajblX7{h!6h#etvO?3`J&YT?V6xjQ^@e^Ysy^-1U zDSXL86NAmKr*hWQtQ^9qZxgReEkX+Ct1NvKKE4X80kwb4 zeykZ8@)L{Hywo+iv!G@`6?n9{~ zbqmhLibB=#@d^a!DuG~kPy6tORu?_ z-h)`hP{jQv<{`gX&4>1)7dnf~YUC6oiC2hWXl*s^4Uq|_Nm^bnf#h{dn z6)6vpOqPwER4~5K$l>2DIrGK*r=-)jDf@lRnIA@SEE8>?xpJt>Dlax0>)$E5=48=X zCTZqE5s_b>2`3^oe?MB~_xjhTQU$G~tPZ!7rGX8smJ&OJM_IU3z46-u$J1xs{sZ*{ zZ3Y>s+Q}&%@V1+*6K|6r z=f7_zVvAJfQ0PU+umoeCkXrc{&yQ&040- zGCx(OJyeaZyO9vpu7a_sk_26R_7mL1h5yA7FLfWmO?UM~-D&25;AYUtOOzN+q%wPc zw8AH<*UIEWQvbeW!Bdu~;{LFnt*~xSe{VQa|Me69?Zbx(?6sVVrD6IV>LK67q*VjE z-*obxHeS^%)YGoaEWue$kat*9YQj^PlDw+904HwY!f1LMvpN0!8)B)>uq$oR=pj|% z-l2d957%M`K_oY$PDP?Bv`Kz2D))Hi=yHY4jaYw9118DV5sG#~>mCwz`;B73WO-wI zulDnTU=GJT2fZ8p<%%AxkeJCR9xPo}#)YjePzyFy*zY}gq<1pgovni zw<|$H^p#wZ}T6L=OhI<`S*V9zU6tX}5QYG1HO%TjwGiRi-7f>0PTCTNF@#wJ6WZ00U2IihT&>QLOo}7c4_fd5dFaLO z?J|3v89Bzb_`fg9wH4`TBVu&34IEGd&o%mUmAqpfb%KiQ9r1ITKEYXM#C?tSceW;!w z{8}*dUg-PQD;d7pvEXtM>O4o*=H0tME5=Trt1jrGmF8e;w@6+bdA2g< zL$Rcne)iQ(IL_Glml55+gaTP{JM#;Xy?PX9JKZ6>whC4Vxy*#=kaUXp*0Z@H*9Aa% zuTTFN6W_hNCJPwC4k6G1Y-QS^pvBCMd+XA6AH>SQIFs_E{q=6K^XPz0{EHg|Hd_f)f z#_WpG>{%G2!qc?R0q6Eeb$cNV|lzY6A2kVF3NOa z>Zp|q1CcanF5M&cnFg4%2;34CZq=8bh=b1?>T`W7hQCxiT9tVILfbA^8jxYs{XfOf zhfZ$~uB?QG;;wwhd0IdNwywWZ6#v009eHo6kcAFZUM~?t+pJ96&M6CHq)_g0CZqBO z(-gmE;UOTx+uj;2qrAuJ*=H4HQcC_L8en%+7FDq3?m+j;f60rry@^L2jj;s2o#^RQ z^()admT=yQb|QW!|Mj3H3mMOZoXAII7L6?gdGfCzww7>4knh zLghzP?UsRQ84365?x@aMRt+sN3YFJ&sjtKM9Z9Vx=i-^o{4(ZAGGejk_c*b`>$#OU zBI+w=ZVvLMZHf+O)Gh zAo&5k)I~DI74H6Gb2dpM7J37$tiEyREbei*57#75GTYwKKN~cgn;R2&v=YA;)oMT^ z;|FNM#9sA?UFwc!swptuQp!+9C(PRXZi1YvcgHkk(73~({pkMrQ<-aKcKB}>53Mik^ijMUcHS6iV(VR`^x9gB5Sk`uq#k13nbaASV4vUDbFSE- zclkA}YFb7>{fY{ApSqJksd?+fe9ZjyvuG3?-A!y{>SQg9CX4DhXSt225#CK_yC?Mb zc8Y(Hp8M=O1GCn$yo3wfs_qsc80!XU-bdL;&OEmo`73 z_IvDI#M|}+`KisMzoTeRsiCZb((w&8?_H5g&UX;~zk&1JFRx|jz1K<7!MH1^GNt-V zvf|U3vCZD%K894Dd`Hs&00eZ#H%g8qEy$`NQhfchctmm(JwontQVW=FZ`#RW*Dv-u z<$N6$xTv8JJe?*|?`WL2zo)^4ay2TEroE$6Z#VT5FG{h1e&it<|D`7=3nUH#(;{@) z_!&3OfPeMp+QzQgM}k7kA+=C9X-TA~Cus{v7N@~Yt}HKpLd&b1n|_n4cb_pc9RkGl z>wU*=?6w@`_+?SHW5#$Y6vRO9AbnEkJ4M@~I^#OeoSyGA6%1b+m9_6^??0u+ZQ6(* zB*Ug=x0lS?xx$@P?Sz{uyd-Tc;Ze#caadI(z${2GW92Bo{Ql}y)4pO3kJ|cOXlgrnO4a>XxH`oev7RdC zI$#rBxHFnM`Q}g5zR-v(G(GH=b7ZMz%k=5Z7$2w3MPAA0B0@^87C0XDIeYUyn zzI?)alTb)nCo$O2_!QxUE`4}NeONdaRUFm2_BXjl_;{621V#JjdH1nJ085m!Q|QZ6lUz z>94dHZURiFq*CN;lD*(zF?fiFzfz`fXd=-!IZ02wze&R>3jgpWh4-mkFi8puZc?AS zB+e%@Oalx0*tzu(5|Ho}u>(Q0}gHn`Bxs$k{mi*56lZT38{WXohN z;tM}moaC#NCH8a{>^og%##NokPoq|>d<&f#Ci@n>;r3^;ltIRk2gVr)=dFOY5sUMG z6Ezsl903nljCd)OF1>F;PTi9S{V9RDl7grX?{L`RjQkC=_|VwkDA?bYVkB=F8w`{` zN=m23m%d)5vaz!mcO5i=(QWg`f1&tlOxMypXS7USefDOFsyavQ^a2aMY3Acx=#oq5 z;qix<^?Ow#wT6`$IvI2R#9Jr5DBQ`BRV>V(qx-ZL|LQ*H6j4%A%w^h7HyhzqM`g6l zHebVGxzQDk>;m`72IbiKnG7o|N?!UVYF;|r) zJ?nyrV~u{`%B)s_^fRky`sgSzR;n6dOh(Mqqv zPAqjWgaISHVOHO*OAef}efkUx6)S(w*+-QJ93(-1##{N+{snIlYWTMF8E%r%hm}w+ z7O1NjZb9i*@TtR!DKdWttRZB!Xwe~H8I^Lgv@G+AnAD{(P2scIA05i3>{ScI*(PMz zBqq21=6e}TUT%Tka<-BA%jMEi8=X9pN`xjpWy((!7Os`R&znStzTPLbJ8C2!pbZ_e zWwLCl^bPE$6>sQ4!?y-53LuN*_ziY{4zm<%Qtn1Z+2ItG#+jq)eYYGsl3bR~PPI99 zTOO~Uyr*$9a1V-kp9Vw?Aa^Vs%(V`TTUf z&u?T$F^B(0QhBgz=U3y=5=h(vx}BcP@uL@@lgBFkg`@ zcKh3xXzb91g+p1Qc9C*kISnmSup~JP#?BOm=A(_(gQGu#?a5vjLS?dRfD@8xJ&apS z%gPK=fWhH_-i4O!4m@pq$bhXT0~K(i--UPgw>pNlA+bNAnoZkd>aT{ldhmb51Gd-w zPQlCOyG*t}iVu@d`+tshj3fs<<0-aJ*A@BTm&);y$r&W6-PM0N+&}B7PA0N zqbkwUqV%X{n$9O()meH72kln*WL*_y)eVI{R33BhXd2049a@ADz_R@^iE4GJ4jnVk zP(e3Tk}-`M#n6B0bw&4PaS-OUxU}N6@+e6u7zB)`z`QZ@gNyOVLIrZkR;16D#exnruVB&8&Quq-Fk z*Z_=;{z+#KPLFcGC_{3-g=h_ps2(T$5)vA<2@D~#jtlvza=*Z%WVt1?S`N^xFxe z=!EnLgEI3={nZikuIu*RT|j}tkLM#8JNp!b!Q0NQ~ zV1R?=8g=WT+s-cA!jC`p#)ol@c;hK-d2Zd znB5aBgEDW5HjcJj+geF_)Ab(+DUrJn$yaaWVb?ADEdSetFzEAiY#Xh@Bi^PFRNs{{ znuGJ?EUdlUMO_L-N25#uj?(HfnWd9y4?Ni#CW&6YkY(pC>VunCs;x~e5o?y=nBx)L zDo%y-m~x_77H*rj)nbiL^?`);sD42N$e>S|wuLsH3O8a~O*^)D;luP^m|%FEg!9VF zo4`LgGTX3g2J56Ti$7Np-+!TmW(A%^s7hQ~CJEZmTFmq#@EsN1rm~;)fg_5~6_VwX z@RIl;EyL^23suD`hrtbq7#=}nQNwvdME z%cGCmAKbmNnW|ZX`Z1g0ubt1tF&=IPk(WNq^%*-41G{qnGROKBB-Lb1(v)3OAH`A1 zD7;Ug5AV--Z8sWd+RoD7?Em@ciS8mtRm0eX>GZuCrxCfox#R(EFj9Rwwrq_ZhfzQp z4U|VL)50uIwO+_i3^>!}Q>~v5)jo)Kc6ck7OyjMlo}?@+ez=sF(hCjs7ML9yNU0p+ z!WvmK{sN0}ne{NZGCAKm(aed@l^#ai-jpG-&JQeh(tmoh4N&^oz6+-wh2zRnM8ItT zF*HnX8R+ySh`;A+7IBV#X0yL(RkvBW$>kOyeCY%072C7`o+9A*E!ayyV9ZR`_2z32 ze6`)MvCYB>Zuj<;h>a^fy0v;mo*SjWBQs)S(Bf9(Qg%_`|_=(@YIXTU|vq+l0bkfk~4wl0QVLFL!cYIDy z_!v9JHCG9~6mfBEOY?NfqV&~1T=8Y-mVvub#iB)zxOkSej=AT{X*f0dHM3|ItUTTS zX%%=}x+xBSbVLa1F3JxA+R`%11p%?)0c0r|`Nd!NPvAV;mmKdUefYZ1D0b@pQeVWr z9e-m{?@oZpgN;hdQ#~J`2E*doVX3Me$dxJ}k7_U4({#)M_1Ez4rBcu3f~*Zhhqi0_ zOBFuPQy8pBIc$#>H8rh|Oo~<%`6!CD5rXrJSODPU_>w97`Xff}?Kc#3&=|9lbMcsd()#8R0KV}p2AfzZD#*9z5`tzO_ zZ=Xa4?;?%P%WskspNWPVGcnH>l6S;a1NdZk(=+KDNY4`;PQSJ}qQo|xR(^L31P9Jb z(42ebhBf4ZkB$oe8V-8mN89a)D=iF(emf)HWeCwYnCF=&vI>ZK->x<$r#9pZImzCN zRpDTD6jW|*PN%KH;Qyl|4CV6ZqKbobcx{5j&kO>WyWT8h8i-PVQVp1^W2>&TU(hJU z!R~dqXqO9$1@G1T2h!#_=yt1Ed>SkT{&uem&WD!_%KE^{+50!~*;XP8an^GN9PMZv zD}LN0)*%F=hQ6IVr2-W)_~qpe*tU#IEk9GdmnK~Az6~kmyOF#aa)8#T^&dHMS3$Q> zL)T9kwT{t|dmyQ(bI(Z&bmtk{y<26EJ^drT^s%g^(VI`_M@%kB; z+46UPT3ttAK`FC^jQ@dLYX$!U%}3t7_MApCE+pDGir-{!L9nKqOh;dK)oGA(>3v@p zEsA#m{c{~8{?Z3d2{W<&4MShH>$yx}`D4A!5(Us* zx}eAaW$+OGYwu5SXni5Wc_7Oci2|MWkB|GGuF9-ioFm}Z#ry{bME484od1DL*StQl z5B)fI?t8T|)IPAUvp^KGlttp#$hfzR;kA!`?!rp3f1`~^j1ZW{>~+Q*Uw~4O1yl?TD_t7R^I*kJPAccUcWQKg&YI>^nmp z0`xaNwk}+$MLc_?zKwR`chvTaiqepD1LItY!FMkCCy<}03OxWJz?G%?X#{$05z#PP z_8+JPaIW0tU8u-!$`3lM;avwf1BKHA+Fcete*9008j1qD$se6TYYkUan^z$CMLqJ> zhZ#?SFk>YDCafsND!lU7f~PC z683AcZ@IfC7X{|D|FOQMO_%e!Qb}=kjhuDNtg$1Bs58mJH>+ltHs-$15vOlwyDPfL z?xM0VF@nz_Ht~2|Xn_g08HYChI~k6bTy+C*ndxBBTWVJFD7)Apx$rTXowj+Va{ppT z7sNrY<$Q{1HLa@M6-&@7Y)R~y1%E8o;(QWW`lB4mL+)=6Tn4rLnf(tmcl)T88wh#s zgJPQ^vd{WAUc)4&t`6EP1s50F)Nax`Ykl?{lV~&VwJ%YNZiIHiLXud9!@JE*ma0ss zkiYuoHYXeItz!cmEv#tr1Kj;N7`e_`%u`3q``TGh$;1Q{t9_AvRV%By4#q-VNj~10 z*%Rl?a&ixR!tbZfjN+$1Xsd-gSF;C_D{dRKQrMm5wREC6@yymTmGLGNSS)(^E&2GNR0KhPL!5+w9`wXg|vl6(~sU7qS zC|#2~_o;hfZr&avIZcs(WtM)3G8lmA>QILC-aP-J-?@-a9p^yxLK)0K9gHQpk0!QL zmm@2>jk95wLsUEC@eyyec{#rEt5k3dFpcp!3X^lRfDi>BM^nr@K# z6}|V`=Xd`ANZ**FjY0DU!Z}h`o-la?5DjoXI@A0g;tShLKNS2D@E*5s8Wo=7Tkw6o z9G(%&qM?{he7m^~Gs#FZPylJ}|WKrj_HH zuLr^K=fHZtsbJE?ZY;G8VWIm#$C!gRqs#(EWZXNcBerV%BJsb4FaA0BPB{Jw_+ruK z@lAwM%$ItXpJ{N?9*Fm>g_1}}$yQwLPOC8nhtgdi2 z{Q7{-H(;HhjGS~eub}JSv$unFdmj|(zXK<=zD-KsP?A}$?#N4spL)ofuh?UFQVRgB z^KJ)z4~37*m0TrSRIv3MlqD5<%E`(yO8)@9dR?aUeROxR;%3}8JYG{;h93h}PITuK z<%(9kp*W`h0DkmNO8Px7quKc-&bRRLH+xAxXP*xvu*)l5Ye%3Vv;P1S9soQZah!Vb z1+R*{9}9W9f9&1ihD0E%4NF-C`W%n+^Nx7I?hoVX{sPf|XG`5hth{G8iy+gkQPwH` z-D<!PGbjjf4&3c4(DPjmnefB*Pw^#;66k*tb?pxHs|lghFX6t1SpzEtE|DP- zj1hpPfg>Y>k8R*E3wY1}00XQ40LxFZ{{U9=_~K6kYqOQSj`~9w zVyz%%RS%AGdFL27^*H*PuYt8qDjz2P?77-DVntv8U<(7&jN|e4IQrvI{hz)MY?o0V z6ZHEyAx}0nwvJ~LxFCfnkbs4df=O<91DcZ4;r{@_+btB?L-E1|`!(ChEThvD#u4FI z-)Wr-6XlbZQUFq*0@dlfPcFQ_neY8ppH_Tqd>MCZaPhUh!;zI?5-TXkCIiW@bkt5<3XB3ya4wW>$D!bNj!BJ78fbkHIV0?rvfDp=Orwp>V)K^u=cDmXT6u z_7zZj1t1=B04LLv#eE|G00Vv%_|^=eX&f2~QP-+1u|9TFYk7Cx_#w#d}+M1-88_E3hFXg+h!1Nx)IZb_ATB z)o$DNfcTYfC)%d6@a@c|MrXRc@Wr#t`6TQlsNlCC5sn98UEZ<#A9$}vxhrv_U703e zBJyn}Dnl{D5LY9FIV=I-fzgFx-24;2cb3>_ml5U!njv|n6D*OqE#?qVZ3F-YKtDIF zPu6f#c2@qrVw7<&M%iA+_Neidwm#k9ZwF1BlFgt)8QqPF#gu|b{^-ETBY{%4?I);R zyq8v=3A_;gY5-sR7zSaSH_We$j02I##s>{r*59)qh|Iom(eK#|jT+m%q>36vIAtVl zTmm}o+m$^0q*PjGz&{h}PUUSi(K3PoI$N}8%678kuu^&q=YU2ycpDk`~#QW)3h;>&Ww2a0p%PjN}pr@qTr7KiJFSRlcon9+B|$&gBl*wbWyoit)VR+-D$z$jHH= zyefRW{{V|AQ>fj{TetWS@&N{+;vkctX}pOhUnz+_3Z~LH0dhgYpIla^o8b?FI^0&{ z!@e=`1&zuP`|I$n5j<>C00bO@auogGHjTMER)4dX#f>ryH-WESG%k*&>Na2tHVG=j zaASa^XCVDEpKsug7+ULa*pCP_cP}7Cw3gpt4YWvkDgnSFCnubY8g!l=)J`=X{rt{_ z3u3d&@!KCKW|F}SO^}h2k|-rZ4y2OZNanat59@!keU_&-pW+V} zUR}Zvgt$#PkZ&AvC3e~X$2^aejPqQ!t?=H~{>9q!#t1}Y2!7QI!V7$#Gv{#O2>DcJ zb`D2Fl%53DB8d)_d8OP4&~oZ{LQWK`Dfz<#z&Rv=obg&kr5jz}(%+%gIo6Z-QR-J8 z6+Q`Q_AO2J~%DWYMDNG&PoCCPG0E`og^KE0|m4=;vH2xd3a~ntO&;x~=%r)So67-gDgXPn(_VJj77Cwk=Y2_&9)&QETV@56ox z)h#4lJ4G{2Cqd*%a8@wkyDk}+0ggBr1E~axdS-zJpK)__sQ3{soTQBsPC-%6QO?i2 zRAi1vY!QovZ7iby058mzu@7^B)E`^aCYtSEYqKlIBFnTM;|g-w_{OD*uvK;{1_P1usKMAzT#}`ZuBC5g-`)OZGKELK z>U{MMo2{8LZmy?gEDVH_N%@Z$?Z6p7ki|OVO1`&H`n2;1zyoyB5s*$8t_FA~)3L8i z*S;Be#>V);4wVPlW|)|TuB8lxPzA(~wXzS)s!!f5tOiC&Ec`2=JofMXk*WEyAXAx@ z8d-6_a-eqL6P>4=05UMfuBIxg=-;RO56W_8 z{--Q98b^t4rJ6*`K{6|0lLwwkuLn3DhOC_n#`+seWhr~38!(WfD}dSTRCB_fPBVf! z@;j|h;hw8&c9${uM)ylg)mcRF#>)WpRRoQNk2`yY+y~3WjVHq1EE;UOgI(JscH_+1 zd48~+%0LYnW>V+o>4Vhzb52$5b#I^fp?y-T-$T#l@s6!$Y7%WCXh!_ut@eUOPeL=E zoG~GNxHRZB4NFrjv$U4~03mqRO~Y{t2_%l6C?!vG&sG)amLCS&#rF=7Yj8_5L>hQn z?Tl(j7>E$8-2nMW$+BJSkY5=k_s{QeTS0pJLxhFkMU$lc( zc2D~6<|l-!eOcr8ZF6*{JE0=5K#Y#;3WJeK;ckR3lf8-GNw~ssrTta^nmS zEK4}`$;jiUQ(e9Hfc!D6U9(?nu-r6`%PXz>7AJOC5^zZ1o(UNoam7a`fb=^Z;u)=H z4oC&<-NJ?+FeHq2?VKM_Gf&;+)Q_vm+oP7Y@m>A2k=&)#?XpHmS>hp7k@G7I9FBPe z6OR0v2adIUZ5^)sL8xD&96WatyC^I=;C6c1RqOFiFi%JqSC$&}$E6 zx#y5Yc{yn9A(=+-qE}IcU^f;!x)F@^IV5{lmWyvKj8o~yL(&S!405)kV!M@&GtS`3 zSp1}datRnXuC{*xSlgI)MI^W-;U0RhU~rf?Y~@KjdlCW3HEPGhUJXm5x}`M?<= zjgnQ{f>;9B02~|>f(hq=!j)<*XzptmCA&Ep^mr`FSlC!v+p5WyFkH$Edr89KFc7vf zag6N-w2iO!q-%U9OTf4b6w|f|#zAt}$nAlV>z-?+n?>+WoarR?+P<3*VDmqdZt@Zr z0EA}&K*>1`f-{m2H7={9{34uMHU9v_{bu4oWD~~@g$!?kM$wjb+(!W9;PxFdzO@KH zg}+5}p-!A)a`D(fZv)AvrGh+wm$!oBa~?wO+QmZwjQ#|GJK8!uwWgG_Lh&MQ1^lap z`AJgSf?0zh?KWK_QFvC);_fB#-SGaxvy*HPmv9GHd=_)Bga|bk7@xaVc=#1wZ-mL;nCw(JWX?rolLp3zwBj z`B7YXYqifLfXA;YFi#&TChnl~9y$PC+= z@sYa(;{!MtuSM5Vm6zswvBk%!VCx#nuGudqkQj)jOUYkY0GUPn6W>5=J|rJ zEb*0~*DCx*t5cWSvB_wfFNw@Ch>=kzW@znYn5xIMakcP1@h2d$`LZ#L3h6vY;a?PZ zSuI;tnXT4gxgh&*n9if+W&i>)zd1SQX~uXocnjmVfV?H9T87 zRXU~l{wTFxF53kMSnv>e+4E z#BUZA#>wY(jAe%0lGsz&U}pq_yRC4eUykbg?H2Gy44B!nJCf*ySE&brah4|-0CWPq zo8kWe?4ROIN5V<*JK}oWL2s$uSlj3t7?8z1?ZwRL8gDyu6gx{kNACy+VMCX%X9HU^ z#?(~kHs9dAUcc7HxPNbpsmo3`^XR^2uYz=aN5@cH_>WJ$ypK%M)+N>TX@f?V&g`g> zZVGcTW>B$(B<{{IRG*_D_z9$Zvvl-=dT zP)Pfnd8Cb68u~6=9_g7n%Yv&t&+zgIWS;{@dcl6r*4%Uz1)}I5_KEcUAF0ZWHvA`JtUQk zvSZ(9L0O&#y=`B|{w(oT!;-O0Z=~A4V30??QV*w?{e7!5!FuG@eh$@qZ*jS%)Gsde z7}THO$ygM7ZAJe8zU#A}_4|?7mCl#JI;vdi8vg){Zd^2XdYOe}2Ls5qU7R0G?=@FR z*ROO>hL?K#IZK3<)s5my$+d4A=sIuk z!7SQ^%JMQ1aD(Zb)Zh(E$Cegn=3PF{OPR2Ljn33PeHS91GQ0bS(CWNXszD^WUY%>H zNo-`emf#5TOp(OAp*T2P$TRD-4z-ngf2Un|=D}u?LH(O-_j?fhgum{=Z&T<2AQC zK}f<3e-~D7(zNV6Q4PD@>Qan-uI=7Jc*>{By*q9Gb)BbbHq)f{bIG};4=Zu&m;9;6 zm9+kUa7k%zkq?7BNp+(=yh8kl^YSA<@Btr5v zghLSwSLcdCNMLy?7!33$7{&OF;ctSlO(aox!$z_*znGVLW|utDBW+Tl_Qb@E<;g%- zgiPSK7rb@hEi?Nf&QGx!T^2a@#7dIb10w`-JvtD+pc>$Aw3)2t3-%szRZrT92L~9- zpvDgv&rm_Z2EIQN!nkTP=2MlvUy0*mDSIdICl}XmmWMkJhks`aKN6d*d*Y<9ASKS8C)Mj@Ylj$*r#8!O?y}Pjcushq#(%^dNO+;s2x)?}U-u)k)IX>hq=mQ}Jv$v*9qpU1yZ zT(vlUH5HXsdG^Dt>7%vrK9%q*M)01iY2j}L=(n;92+hhvcHm1IO==YyKt5&O5s4sf zEB(`4I{5Fx8gx?H_($MIm8aO=j77W6kw9R(9J6GxRU~JCJh0eOa4l&c54=4#ldk#J zQ$o{d>kGBC>$oYqw{wr1YW&I$dvbk_ZKr^3?4S##_?KAT08-DW#uzBz3_392amGD) z;=F7Z57k)wTIao`4clMx=`+I1l@IM~f4gPROaA~-;&R?A_=9_M8rtc94(x4KR!yz; zSCod0mu%Hius?0dPALoD;~Zmlk@ocJb+7*;+f>pukP$Fa`k0QyT(* z+1v-A2b%ABHYs%cZ~C6Bu$Xzf^E1ujw$<-r5zZ~1A2os&omGrwvIzr>kQIXve~1x* zy0u5a-x7Gb1(w3sPIQdWZW62@UMy;1dZi95Jt>_i5bjNl14BNM&K8wK+ZDC z@o{@Uy5?&GMg60QkKoJb_GeM?9i5z0O(QXm+fWT6?O-xSSy7G-cYhEk1XP;ehP(|0 zoOiF`i>M<>bz8O6HOq}|&SNHFZAV6$Hji-w zV=+RiIUHkSAQ4sH;qI>-b6ja!M3a?N zlgM~kBqcJ3B!aA1D`B|Dwgo?_)Y{q@%LiI4z=y=`rCrMovwONYi{>Z`sYb@tY%2n; zcsq`9%J=7}@Ylp24cux^66>0!j6QS81;jU!H*^${|c?iZ=X;m1`K>2raPXn5*FNPF}tnYPr;=42o>_ZHxMk70ZY=he% zjNsNXz|HAz>;4$a>e#5gu6wP=iM%Twj>+MFjr!a%C}8m2YkP94h=35RPD7B)4mWKl zpsc&!il1R?SbR6}()g@XFk5Sh_pDp6I3tu91fBp200XsnPNNmd#QuHUZm7UWE$&3P zX5?h3Dq8?>2wY(F9A;cw-<2*;O19D51V&~8&@d6JVNMQnfu2a? z=rK|c4S4cvpY0fAy^#|IrGeSEGZTPL4?insEzlmgtS6V!-88=c0Qg~hTCw{l->LBj z;g+Qgx`mgG)ve`;<(1))BN8U_mM-4M@ zrqWwx2y7A#Kf1uT?%!oe=U!?^-mXgE?9||Q_(E$_7#+F znbi@{6%HE#n2fMo6M)Aoa%#q*{{RRgTnm2^_?tqG)5;eiHb&7Pb|{m9!6AzB0a3V* zx;B&W)}8SG0L7jxfu~Ih`sJD#414X|76XRtGFJztan42o&V{$Z?}=9}F5e8?s8lXm z4Bo$ z6`ct`IZ?U5Va5+&3u2+v{0*e)NVnJXO?Pt*vdd^v?84!(fE1jw5?maCoT&#HuZ?^+ ziiYOsfVqn;19SG%Da6c(**0sOwOXF+zwYX~^CP<92 zs@lxKSnguLhumX1LZkuC);Zmp%B~KlFS_o%QLj9CedK+UZQ!4UxRaHC3%Z4F#+yIE#D;5Fv>_a?aIFhnT&RuMBC#1N#fdpB{3I`O z0I!q$2m49tQ@{2{jNL?%MiM*wX_p|e$Y3N2ILjy-hYU#jy!sZK@khaza7^Fum|e#$ zuJ)Q8O57}%48Z_wx>lYb4Nv?$;dU}oNiT<&5w1v zMwr7Oa))cUUVrzC*1;Z$C zatgCK%Qn?G0ONu;6U7bt5VVRksWr%65jw!=W8a1+?*#;_k-OzN=Narpdd`{Rt#il{ z#|v6ZEMUm-yrtDZbXhyAg~J2D!C+1XYCSK+o-tbia1zPjVIYb*Cp#oX%agg(6(b;p zKXkWT5^Fb`Vx+WsiPfdn2bB-mGvUshZW2v)<^@R^nt1IJNZwFDSkoD1+{`)Jtah;n zj8)6e*_%VRSJgBNi>sBnwqh+B$fuPjJ5wBz*-%Rz%A|}EagRpR^{*0JUs}m=VF`*S zz>G>-B#naYVx@s31PsMSNMLyf6$~2pinSYCYg>aKlNx!?B;}Euv#Rc5oH!d)ki>LT z)BqxQ>}I^O`_V4C=@faAe$L(~ie`%2!tZAk=o&k#i@3H)AuK^LvgL`}o(aKRV<6RS zU-nP&Zl`Y)`k#)h?5Ez+NUrALilaLH)gK|kjAe-fac!k>UZ<+u#bc(tx}@^QF^Pr8 zls-bYZMj5iTwcpsA z?V4+99ue{UYdyM=D_uni4S-Y{Qy1a49kd7QA@^Lstj$ciVFK%2>IE%m}Ow)G)c zD-bs-pdglI$-v1WOj>54;rshvvpv$!J**+fjv!1CfZL8kZbHNWIKqOYa0YUxndS<1 zbHCg}aq5xalK#z}Ia$&vbt^ZXSi&Pj2!!QAzYbq;C5oz?6S0_N5l^@Mn*K3q&^kq= z>9_2W$nZmVbj=x*D-Gxnu0mj8OLALa!*gEtd}C{%Tf{X6wn!cRcUeeRWeThbVoz5D zZc&l}?8c+kJXsycYnb&Fo_Oa$Y__uqrA1)Ha83fL*sQ~E0Wb(St!0~ImD7KM6_Z=o z@LenRM)Ajr=buyYYFOIO9g@7Z%RHr{nQ|R6NmalqLk1sv7#x#ye#jTGT1gj(J|{Kg zyJL-onGJ~~gJbz}9+ZAW(} zZ|lqY>Px2I!p}JI7wqMtTIloXUKh2~rfYJ7R@8@-j_<-LU=GJ%9Fh)Lk}?1-UOnQE zgq|+3)g-sn`~#%4cCRD<0Ha-hsth(EF04u;Fe7VWf#U@8UrdjMz7O#Y?WL{1h%P6$ zy;xz>ukCKx;&=msK*K3~WVcdFU=5|W9)9!TPr#W=OAdwhi-QP=%c|;U%XwLq#`#r* zV;d0h^rMO+ta`N7IM_5sq%g9pW>*b`*x3cWoHs7DGFc7xM4>AOOdoO zL5@j1272gEH^tu=Tgh!ck#BbKC}xi8ID;r7h5!Woz|&>q$G-jn+-d^qtM+u6b3 z*(ILiP7P^q6A2K@gb^!^;n8F}vNo|GF~AH7rJ#PxT3)etacynjD~Gs+t{(;la2Z>e zkf}atD5Z(RF5m+2*j;?YWsiEruAtz%^a}0N|V+ zB-5Jd&VhM$nuVjijP}mOqCixuz6(Z6E;74`S8x=t0G;U8sTj95zDv*7qWkyIC4!oo zv~)f^)HJUVX_w7qb>;c2kh71p`HDu+aHnzS1mJeynu_xGS}~;8H|*0g?puXiwh8V= zH=Yhx+!2h|(cUiq0D@e6Mx5$57W&4cac+)M+TdBKkv{1PMpam`B|@P)RRd&_K)*ly z64S?aMq9f9HTeupxxUP5rvgo+1{;dR9PTOr-h_G`{=0{Yvz^SI2Nd3l$oXXIR<;(2 z_M3U-iYai?rb9fTN!m|b@NtefKD@fi#M(XexRy90RU#}%&dgL5Zc35~BofDu;mF_- zUt9QN{s~?24^UUsd_&dRmT^Dm)kUXb}@85Rn)HZqJ&%O9%DY% zkIGUJ6@dU@xg=zgSdJJI$4YYRejmQLOE_kNd7X30{uZnE$$wQ+9HEO5Qd z4IpS#GPHq_3o+WkK?i9ok^EI_6N0xiTG!2LKkUp!?9t+16#PNr4Kw5SjiG-FLTyiq z^#L8vmKs9B;#L?6IyV3o3^#F%*E6i^9xRsK;ncL7pi(&G@?(fFEAs)2E(QTT&T!Z{ z13zVIf3shP^}iSE{xsA)6{rIytt{yzD4>xf%%)`^6N4dRy8(vr>zed$4gSi%2abs> zvNpA9_g2POpid5{j1^EF2yM+PD>P$!F8~0?C5riZW-|*6qYcd!e`if^{{T+u`JP`3 zWzwGv;8T0hq5lA;XrG<;?ePy(SgbV7cUOioL{DPPB!VO?hhi!Do8?tv4%TdN0O_x$ z{15S7sbqC_)~^^&<$|<~Uw-8ncioecOBTSv0aya2zd&rh3jWWws8xJ3;wY{b?kQyr zV2ZJlvLtfEb`pxrOKosTILX3;T*rw10a#6CJ=pO+yt6_J*+(9f*vct(M%c_;>;rbt z2qY2@%4?=Mh8LTb$$j7F{b+kK%W2cUG?V$C8_V#&#gJJGnRS)9DfQJF@wkfIKk$(N5-w1#7EYJsMm^51mqL?7($&t16n za=f}(t$tWGlK7ssTYe{Q7X?QbJ4Wd(-}ThyJRkcb_;2Cno#C$#>KYEse^%FREi5#< zxkb!&#zx;H@&Yi%IFXrSIaO>sWQE7kzA3tcRq^J5q=>T}tdr_)U3YCUua^%ezUM!c zQq?Y=`{Ev)?G|IxwE6B`isur_b}v1S10P;$4KDUe&l_sj$8Yy1VMyS8cQCs8OSbrlCz)8rE8m_J_oK=~rQ0GQulJ&pBBn{o{_f z=tsSCc0MJBObLN^CB^cksAR_IQE|8);GFaA?Nt5gdcVk(ElsZsUX4%3dcL4$ z4%RmDS)d&wE%P=%?~~11@MHbI#r`1IJizMLQCPxr&_uXwf53>WpN6+h2ZrxFPj!R# z`}>Q`gaZ+S@{hT?}8W1xp6SM+&>$j7|5Pu8xf8-33&r>qP zeXd-_s&BPP=Tfo;-wGY5#Ew4d{uNhExzu!>0Kuw4u->Fn`H?qNk(lim9PzI>L71Q9n2^_MryuvnM zk*-*_-niPQiu6k+X)lJL&@_VD39eXV;Yd@0NdEw7g>jl@j@Gw&o}%#o0IAZZMPu4Y zSs+vO`P}{#o#D+C^^8*avRl93a?~2r_<|I-w;P#9+9qx4yCX&1$DY5!G2EUjUeiLi z)Fh0-F5ReQZGdF$0YLT{am{lYb-cPKg8WOX#}czi9j>2l{nN~s&2l*YUHy1J6DIo5`ubB8fG4m!z=~~p6$8E0|ufqQTnK65O+0yD-Bznz^cJnsV zY>zn`LEJ_V4{Yx|`&TpKe*{5$BWn;@Z-(A4J5*H}ZM%YrBN#tE(EETnV1VF}U9=am z6}f^ZmnEYdACRb0-1~y!tLXPD8%)A>K{ejcs(2n!spsD^8j6qFMp1oaqyB%XGK?xi zmP<=&bNYYmrQ?lF7UNHn(tBx9u5EPSWvtKqlA&0FEGh!?<@cb5l9zrJ~VEU+@l zY;F*SxVA-d1b$&G$+ssX0AaZF;qfldPqdDEnJp!N&QjhMwrh8ZW0QqYtPzlqHn7Mg z0PVWJG0n3)=O@R?jGB~w%eL$M*3EZanmoy2@UW@LHE+LT=g5C&pB`vmYtyw~4GVd% zERkdX07^hJx;eHpF)E}go!L7^4?SxqQ1}(&{VrR0buAj^7Mk6qNMycUraxS%Wj!zs zJ;w&VvDUsL=u`N%&evUrXs5H)Wu8s2JdhMHN+U7KKJxBh#A9|C6Tk+xtu1^dsTb3s zk4$3sPOT-#-5ip#{N!zQ`@lX3A2DIH0oTaO_@@;dZY9nM-ca(|nn#P4;IOmJa}HM5 zze~^lVdLbFitRoUYpo$8Lc0X9{{WYW$Opbhm!8MBue};6Y$Q25(eau@ckV4~XpQ6L@JsQv7jlZvv?NrNDSGy~p zB|n00H4t7sE^u;6{J?wjkU0MU55}h-4K>Ik4&Dt8j6PR3Ng@2z&+;8QSJ0ZL?Ee7c zDBcNV)ioLI?&T&uLJtnc>l|v}Gq^IbLGvI0WtmB3!h*}u=|8iD&Cp1+--tdViZzXb zyJ_tJxF}BGk+$SOc9IpFb{R$q4PKmc%p9%nTh%mk{X(+oe@2g&Ej}N$$RBI4ct!|u zAX{V8)tN`HO6dGG@Gs!DvdLxQj}=*2WPq)EcNBnnea;VVOMPqT%|rG~*7W;PdvEcN zQ^b*iq*_52&AR~(6U}3e1_vw-NCit`UH;AfDYJ=gFMMyN+^J?0E&ih_x?y(>;df(l zfYtF&q_yq~H{8p=O`=6bj6L;E}4S*6~e;*SqqvmsADI4&50oZx+) zLO|dg07gJK>0G7n>|OBE&Oj#kyQM=AU9GU{(Xh`1g^~pWjPz{r*Vo){e`K$SHxMk= zI%kESdik+P+U?`{i-JK@<>9ipKPWi_U~^RNzhlpc(rMpneQU#(Kf-*yQtCb4fD>o9 zlY&0*#yx7eqL#1lU*s=m2J}ns{{Vt`^k1>p!PxEOwb4E_Y8NufS%tlvF$jxflBCZ2 zhf;p<>OJc(#NV=Zi*IC;*jZ|lCAkN9(5>ZHJp9{{%W zUidq8HN@U4_*LR2mR1WIX`0%~Q9vrL04NDnCjbTHE=d5_*d7eB*8E9mmh*gY@tm`T z%3A8JrZ|3jVpiRrGnVO!wW|KkJ}mJfw71$Gy?qp%8->%fr`$SZ6}ewd%l!DpRB*}b zb?EK+jZ&#=t$s(!_TRD}!QCfSiai&>KM*Y9e>3fO7Z$=;gmPpMON9}L@QkeyF306& z00KKd3I5Bz4bh!1G#mX^OM5xiDc0)sO|Vih#x-Dk<=_(e44?%ek0;r!zhkXLoN4!Z zg_Md$$Y+8yQb#~Mr|0@~;;!6&&3_j>IS-qsL2GsnK@EI~Db8`j%5Vk%1M~Og#AWz; z`Z|y2sY((~?g{=Mp~T#L7SJv9xoxEQc(<2ka@MJ<>FaM4D6N9BOlEDWpyfh>ssK48 z)h#FXMDX^%EIM?b4lSaYHru+5KwOaSByvNSR9*>e5%)pK+A~`Bf3yDp#|yjVxSPP5 z^nfye_Ii|XqJ<@w4zhfp6Q7iI?U7yYgFk1F7;7rWUbpcbywgd(SC_<7UlL1iVnPMU z&NK4^)aM7D3YjErc+_8y=*_v|C#@gI<0So@WAMGz#NHg!G&>t;92q>j$s>&<07evt z05<1kd@FR$3t?DP(7p@!x+@v9eR4}(O6DoIM2`zbOa)ws+>OfPb{mn9GPvL^dt86B z28AuU*m#G*ujd|Go$xz4OuwWB8?`ygG*S#DPrM^xmHJK z22jjb9I@)keu>mR75J9w<~z+##unGoc?JYGdV7_4i96JYoRA2|BM0OhaeGoz_+#b$FZM$CWhS_{8bp^?kWFk5M}qWwJF87gK-cVJo=IXWeJ z;nydySSF@vAoxi zS}d<5Xt`s(3vC-p9gVp|0yCY&_^vQhN>A_{Wj*=P$#{$6idr;!P4|S2&CK!$qL)sy zGcgY$#zPRms^@bR+y>U%*-#BK&&0Z>w<^h}_+BaFfdOeFLnMR{sEV;+ak!C#^AZ&H z>DC=H!+LO%4LeM?(&3pRHrix%a``cvG3zV|B zW?jo6Dh9<&C?j!JZQNiLLNI<@>v}2KMScjY!b+ZBjTa?jh7#c_U&8*iIK{3JE#G z9i-=wGX9mZ|pJ7K^_@t8M z<92q6*<*$Y7v{H#BxVZaNWcQCfZY7RFl?vwSgu(mf4G+%GS$uf#$KNWc1${*~VL+ml?Q2}@&K2%^8N0AW8%QTFkRTX)5E6L4Sw)mst zdy843xwm${S!HOXomMzaw>t#61Snz(?FS&bk%b$nOBaVyT&mxH>hv{@Toowkc5~OC z1KfCu?$Xo6%*#B?ZDv5XCD@aKi0r5X@`rFx%tLKbxNetG@UMcuvz|=?Qu4ji2qA+{ zc%p$(lp$;~Zz^+?RSFhT8=|dJe-QYN?iJJZ4J%Ezj`?L9;{DbaXbFVM?~JfQ+f`Y% z0ojOx$64`3`X-yHubCsO%8>33yJFf11xqWVU@HCQEx3Z*l@=~Mua#c@UxA;t!O7Xh zmLK7lfo}AxogY=dm7-`^+_VU?OEQhp;L5r45V3uyfwe92NQ|i3(LM;MNViT9h*#+KHA~t@Z|n!R^%}86^XPRa5wx ziB(;nF~Qqcpw~4Ui%6R5S(({wq<3p)k~qxHO00M;3Y;7jz}>xtRbk+L>QlDpU|edR=86thtbo(Qp$%57fG8;0Nsu3yYjrI;+623g#NMJiY2-HbY8 zZBG|jzGo|Z{{Z0s09zLfP8~g>zDCBS;cpG=*S8wv+Ich2e7Cc`^_xJFtHYLJV+)L` z1y=4M8+Pz1bTQzFbq2DF!?w385CNK7W)AZ1c++Z>%P?d>K@6&;U4nywpKIfL36(AN zd#RcwUm;_ViAas4+@)1jcWrVQk~lFoPFR6<@5f#rlJuXlE6E!p$9B!;%0|qptXmG^ z%-f3`5LIv(6P5LaR$Sc9@2mVyh}Xj{S1;Uecy+uRVzKGgx(q}`50`Is7MC1*9G#*- z#Ew}(R$>%j0Nmu)RduR(S4z{rv{pN7i!{EHYeR2r{&G5y%B01TS)E5J#g6r0S7qD`&*AhFuK22F{?QQL5*zm5Dmr(NFb`X7#NZ!TxKXNjY?MYTZjxFjo*#1Ave zXkQ_WU>~|maeBV5q}tt0rrv3iU&$4mZm_`@obiQ;SNCB-8@WM@ZaHl5RcoBoJX7K1 z)}_+#w5jc)l%^I$Xk{)`GOr zk*8_DzI2V2IcDD+u`llO1`9Hk7|Cpgzyl21aXdls55kr$zD-8vc!kfC3f{Tg;TJH( z0x;ux2HJM7%yY)t@EC5o*6?xT7M(*Et{2%Ja8^v?2 zk=JW}I)?uI>M_LN$8wT4+2m0=NXom3!8^WPgoaQz1zp+BXk2*DSg^T)BSKyzaFPTO z=G}foWZ?N^A>f_N%FHrL9P-!xBA(6s?K8p)3dM0E+QarXWZxny1VFgS+6hdPfW(1? z!sOa`w&O~^k4$|5FQ&P;k~r-C$kzDWVp0j(HUO>&49AVlkTSJRR&6BP`F~xEC5U&w z`5uFR(xkSM$*iY)dsIl^jo3!eM$&NQcQ7Y&1BC?VAZ1IaLw&5zX*HCd zWxUyOJG_jOB-;w)z5(*zC76=9ZNOmd}4sJ@mBD68WcM~Z^JAhRq?tuWUKvKUj?tW6o zs+N2BvJHL zi+hm>65&zjDYa8@Vx`KcC|5g5yW}p@icxsW!ZPTKG&-%_g|bQ_Qj()ejHSMD6TT1( zILHB1$$&|0R9a>1c6xf;S?KnMdhME9W?0y>lCYLSaL&1xYcXX6ZX*l=*U>yP;ai)@ zmd4Eum6F44EcVfDUpzL|S39uly;aWzO0qUEtDi^N)8@KbTj%|EJ*5{d%?8yxHR2fl z&Gxx&R7V@#v9@i^bB>i+<)zwqaq{B-!0@T1|shcwLxN%$w>ttVO3CxXV#>r2sNx0c@AF!Qct zWCMF_O^drEwp)^}Nu%G}>Jqi+w%pOgu}3y@w6Waa9>jo|^%e8y#*d3yr^Qbj-}sjL z6t@@m%Fxa#n7}XPuRZdy zeii#mqeha9DMdv&EfPy)-%EB!(9)+0FyxNO9i_$M!)A3Un5x`OE2i%8qX_ap?}z8# z+M-zQk|^}qpz$*8>ZJN>GAr;*$Dn2lYx5n!!#=3Tw2b^KQ%VvAveWbLkp<`a1Y# zSqQXtG0U`^n9!Fv>T-4u&Z;h(YpqXimw<@uB#z+~UjgJ?EMdJs+|Bh-R&3=4!v5+) z7~ARKCQd$iWJUaV5AWWpX_pYkWpQ*%d5?1(YCnf!qdEL?>5ppC%}K|%B!5;;wl!~- z*~97+#IEs5kig6VDt5ETNB2$)W8Wvzo2SXL)Dkh~-8+^&)j&n<(;@Nix~g7YKs38~ z5^dgWlUz!NI77e79e>%3XZ+%QPC4wNwXsP{NSb6mUl<{!UpEImFw*1Gic!4go9ih* zlvS7F1bUsskqrh>)lbe|?O15}JR)1ezqO*+TsVVk@ zm(EW??9J&G#_3kh4LT%-dyuPai+>T%VrqYDOqPz;R#Jb2kgCJP>UUG{0D>gLRB-5>~K)#CF1Ld`{K_rKPmu9%gr4qKL@L7}Lc|E{MpX0l^`yF%DP)aUuo;oaW5x^Z z1aaMCTBt5*-}$HbMSs8>c1IE7T_$^f30mpNA2aG#7ftfuVM4;cU%JQJ>rmL;E{oy4 z2TivdcS(19FdfX21Wx||+)w3N{yF~7v9N|q0vm3elFHb|WMwYhN$%L~^saZ}74ur_ zP-%9vw1-dAZEa)3drsjHBxC*O{b|uka)+CgzcjxzM6^%jk3_V)wi+F+%!p!XVlzy{ z{q4+3w_ck9kHWJx3%T^vuv^$XtXUhEQ^6uuG6H|#C4WlA@gA=po2=aUGfX6Xm&>t< zHf z22znVl(FT{VGgplHx@HT9LW&~{%Kw4k%>UbKn4_?vH^{yL0K-fYwb$U zL|r}@4gJ03NU~bUh-GPQt{L{aoyu{wcjpXLV`)QGH7jAE&voLhZ%~zm<3~IzJkrG9 zXWCe<#vxRPC7MOXRIUl!bNGcFF?HU%$NvCLuj{8{<#N14W1D6Rmd(zW{-ea+(se%* z_;*~r(ivpDlI0p{t?wgO-ZrDS-bPfO09Il$?dq;MuZy1&X7G=O8uLW(E%eg1u(sdX z`XmgEb1JEgqKra^cwM9%ugXd>-MCbec-k#P!gjyf_t9QUwjj21!I;%uoU>zo-K06; znQ#bQszEh=)5kJhX}5PtZfzk;qIKi(yXiHyki->qz~i zt^{z|>N>UZTHRa}y|(iU6fC=C6D*7J?ATo3oxWyQYJppNUbEqU8tcslv2)@}YiL$O zVw!Eew5smSjmPZ@1Hz$+W%AB9uGUc7mJJ`kUJvmWmleLTVnl}8M7Nk}O%%5)CKgBw zg8p7#Dy2asQ||!Ptu;Pdx&HveFPC5I&ou2VCdqs|`$FkHFtl5r5%@7R&0MpA_MI+S zC02=AWM^bMLt(&J+EcJ8461s)JX!Gfa%;XI@c7fBxUo~GMJ=i;K+O;+r_Dl=NT@`Z zb_I$YVN^3z(|jM`e+UOTt3Eas=JkFYr02- zWYA@~iqPMQFB%)ik8DRORnM_g$+r@CwO!4h$8J$%; zxQTO?7-J-CyaOH&72!L}xA*3dy)O=Tf{g3gKe-i)Ok^ zU)lGUDD4sRC5|g;WtGvxJRqtmSBRAuUJ9r<05EBMIpYmuRlK>>G(i@o(Ik^v+uM-O z6a?;N0D@t{B)P%btCnWUs~*qotaG=#t>~ZTt~k@8-rD;A0Kq*INciQd$D_5?=Z7_j z%&yJ^wx)Rz%RXVsx|G_X8AjGpySB3hCbjgRh#nHv<8AuENfK8}h^G&@GP?rA?Agdt zMo(kA1QA|Cr~FmA{?yd2ZSH1??;(P8jyR=`HCYKW5IH6=;m#PVF^sRuNo44Ce-&yv zZ-(zRySeX9iq`_xc&zab`3opnf-ckKvtv7VGKOF<6tLEwe~OFNF@J~TKW9lr>eam0 z>UwnkC-C!cwnTYuL`CC@OSD#X9I}N++z%Kz0~}`nM&Z6$L0{hxR0UuQH*7QbPDwa^Kb6T* zr)%^5==6K-LdMcGyVdTcjthrWf?Y`+y84Wa9>047j&a&Ztmd8-)T}(qi(81*t)z!c z+YZ9H46pMC3+$@U zrAl|QY5dP(hD$l&V#W6-INC4{mec zwRxtK`$Jo5+Iru|;mF?cpxT#neW9X?RaI7yFhW)lhi7(X8T+e(LiGB7j{Gk6jV!w2 z!ep0habHI^p0PIKuEWl9%^7mIm*rBtkOtiDv6yANt^Ilv>}vM>jp2I=%H|umWO8uP z+izAJgTYV<`kllR?^3PmLOk^IZCRMUW!6|To(D!i*yAJp2Efb+$r$5LOuEu_Xj4(}(%Q_ADT*-@sbb5!a2pv_NC1K{ zk(>k5I>!@2qO@eb{{Z-6WeLS2j%R2eT`=uC|IBE z8z;9`Y|4_{a8y^;yY>MKI({{T+8fo|Vl05oK#E)XCY$MH5uKDerha}2v6 zwAF14j2M#~K6|kpNGia$IT;JvJPPQvci6{=OODN!^3%dF z$8CFODpHuqs3^CPa}#Er^Sk^_RxnM($2u?S%epc)EFYA7!@^d{7 znIB%Z1urd4jn>f-NFNe8`BVVsDoYmHv8iTdBP4ZK#XOU;pAy|QxV)ZQn{jVwk1R3= zEUOy1RY=RRW{Vp`0>JHDubpatv@e6wEkg1e%W1VMJE@~OV78Lt{bVJXP(q5$B!dHV ze|u`SM@pyTpC5cfw}RVJ_e|2dJTl#0=@*v}$s8;e;cg4OyO~QfsU?OSG-Hy);NU0~ zkpNhvKp$P@pKrP_|V-~t;KK=<WUXI=kE5+72W~1ZHX5&bT?tdm*3n&Z? z5COwT&aOiuxPO&E+@o-9#ag1KUea>+SFh`BI}yt_t&P~P<((yh>r>L)+Ql!~?9KKj zmL^{@MtKqyLi<>eoCPa|B-4|`o(i8hNhs?~SapnR>Nf~Y8{ap#YiEVG@)Aa(;$d;_pP98n- zGM7MH{DS~1ZX_@Y&T-Mt@NNx44cb+XqiuC%X?GEh>f3Z{ZbiDII17b4L;S}ItL2Oa zRcfW8{4w!Yjf}n>@a3F#Hm2E{$lgk^0##%x3lJko3OUGap|HW*D_LV{;jqwhonLid zUZ3!M!ZhzI8F2W9`JYj;)nPth5D2ZIxIrXxD+BY!uBFQ(61#aJx?z>7l)A>J;+Q6v zN6~G*#|x=~c59_##!Q9CMKWcCWbIiQZZjvc>Rw2rTk}?X02MRZw z9ic_&d?)b=?@5Omn#nKQ1W9J_6wFBBjv3Wh$gEf>`H5#-4hRzJ{tCMqg|*C*OxH2T z1cX^3Bp^_;%I;N_0Cpvq<7oi4SOU6Aq~Mxw`u??h{l<}}Nh?Utv+%Zq;+;qAI+W3y z<`M6JS{Vz5+|#l-PnJa(AZO+W00K)@cPUuvqy<069*|I zZlMr{QgW>(;7_cCCF(NgL%DD zGa?x|QcHwj#a9Dw;C_?fi;Y)MhvKJo(7VZbj8Qrtx48u5O!1+faF*)Nye4=V!y9m=1q>JHk)G{s3N(R&Q@5it}Ip3DP40Shh4xB#G%V7k+d*S zq>AzQTSD(n5D=Wwg$C9g`Ffn)ejtd7}!A{zaTaOyr+Y39(xUQBP#)&6}21Sc1%jN}S8Fk4a z!WI%g9FvHRCiI)Ps{a6k)Xq3)N>)hb;_%M6y0w{?N0JFH!D|AyhDeM=Gm;U&+!=w7 z8$%7s3uFMTmsB?DA5fZPxSj+FZ*#tXC^}&aBVglzOANA-GqfDnd^8(7yQXgtYPQkZ zTtg#VUqfrTvJs_MNl@iZT#})V;#HN4ZQT@J470p~@M-#VcThoYkvH0>OuI}`MA4F< zrr`TTYQUT)-U!UuyfwCZ{Qm&2FZg1W2}Ri)y`{a4+>))N`fc2*gUpU7aX#`&EF0N> z*0xxwEEEX9&KL^27l(AO5j*{pUA2v2g(TE1)_2RRO0n~~G5}cu;l5ScSYRz%@ZFW7 zT1S7U>B(;IF-c~>)4)kC2_#^g7bY|$Imks|*Z_l8th_yIZF6v7wKo>bE)9&)!p@eD zjmpjxk1R*I3X;26qa%`4g>mI{{{Z1L4(;|K{{W`p-F^Q6@KNFL#VT^)d;^#K_@USS zoKY!hx@MbY2%k*Rp4(1GR=t%sB!cY~RbpWvVaIY15=>wQ4VD5jLuHz_;F|lxUK5QY2rb$;+U(DCBaYq~QHeQL4HSWK zgdui(!xnNkSZu z>g2Rq{G02i^uU=I%$0qgjfdx}eCRH*zz)jIxEb;SU4q*Y-Yl zjO3mRDWhR)qr)nfc{-`N%LNPaoI<4bE_m88ziBOU(@FS!r|F9vu^XL9ZI|s66Cp%H zD+c+SKQ_`!5=kqbmHIv&7(5&)<@i5_>zZ9;@2}r)yG5q{%^WAy=w8-K`q#|)A6B<* zI^!(35UK0vI)5tlZEE`Ee0Sl$06_$Bn_mv-`e}|7Aekmrc^HCE%uLxmy@hc8E7Jb} zwe{GnEz>4*6aXk3v#?+Nm{+1DlHY#Y*BXA{(CNM>lg#s5AV+0ymKg+$4&jY;~M7*|Co0rpDp9tl0U2bie3FpAvL@{3%R{!R+8&diX{Ycs~k-2 z`I%S#`K;OR*2hV|Y=W4zre>ATAB`K12s0rv7=iKD3%hsx3I%Rb9U zV%$L_$m%x?Vl9X8UjqisNxuj^oQ<3<&%BP%iYl?~NX7?0Yfd z9s}~P987Qz9S@c}cdY*a4eAQ_H+r?*!B&S%ySY~@yK}PaT>diFT&^^9-9zz{{{Vtj zmp9MyEqHqNY5Y?zp&|x~D@&WFX>vyIDS*#@UPbrGA6lBxt_7X!scfu2vyHpTddKEU zn8&#hxcXyhX>Cx-0;pfYJPUR)GxlG4v18TZ*5@!E(kX@81s{{Y}B-=Mw~^8C)KTe1HD2-St4 zU<}uHGq4PORwXU?N5}sFUZuHFW20%Y{iHaO!|ifmMi9v(wlX^Wp=&S1H;~%f*y)~T zUe7_c556sjd&TA+I_Kvfom$r(3H(ahC81c*G!`J?j&P|FW1dMQ=BAaKgMZ$a=9l2g zmY4Y>NA1#CK@<<>t6AA1x=j0sZKc_M>hZabKbflfqezb>oKh}AED^SQ?Fp05sia%9 z*8Eh_uF#h{meM@JK#V>JEu7#YpPMw@8uknS02bM-QL035{K6lQsGFqcJ%CY4GIZdj z{IGKQy+Nek&fjsH;_ExDf5czeR?8bpb9)`MVggxNnbkg1#(unJxwy=}B>3lT9lR?z zyzw$NB_u3}lWrjA)nh-Mbp98*NxW%isob6;o=eBT?vcEl05CpaIK^RjBFZ~&7)h>A zGe;eciKtyO?F5Gw%%iVDS-{V~6j{-x()%>+^MCNfw4~RY{ES}%>vQ;< zM%R+0%t$IEd59!?C+S+>4m@2xyW-m|Q6yn8ypsDN0~IQ-E^*M{H4EM$>`r!+1GetHnj1V_Pp~)<=WZY-n6ni3_8Tyc~Edmx`2C< zIT^0g;YY#!2je6X>0T(BQKnhOl1w3x#_u`EiB*T)EAzSk0CalSo@l!J+ju*~`lyXc ze`LXCT(bsKHNySeaK~&!VCRFLpjW+q&wdv1H-xm?Egw?1G2UJgJ;V&BhhQ?Ko<2df zeXwg~7<*a#m)x)VjW^1Se~+Fu_-Xq!>3Tk~<6Ua#HO{YRZ3meukp;B(*7j0N#UnBh zv)@4E;#odb%Z4$jFECii8`<;03TyEWD4~SIaRwqZVQDu8(%V{bvwm&g)cG0; zanAD@D_fqA-}QK%1atV6bj!=8)-CQJjm@-SE!G)r7y_II~9~F zfx`t-#=dqET0O7*e_p4j3UZ#7Gxhek)$gvf9cNvh*^RkJ(}n10q+=jQ0msV7pE+&V z%DSrOiayb=>E&)T-Ad-d-W!8G@So0-Iko^Jh)B-D+=nPw3^JFEM%LP?*=d>-`Yx9% z_)5aX?5u2LjgF&f3}FgF<>L%MEb0P+=%HC%Lt(JPE5%+NZx~$53hUu?Xr4h7tjy3p z)JJTHnT(O_^H(xwXmHF+oQ*$u%Vz$18uDqiul;UdYC7kP?z}@`d7@ul+g+&^$zd!X zyK)Zc0gauSTmlt`MhwfA9Mf(g)OA~Vpp|T$?f6!QQNusZ!ci;681C5UzFE#xqJkU} zC)aeumMJt3186t8fDLh2%qE*@Rb`GxjwNS_Rn&oktu_M?$7vPKjoM$u2}_?e zgKK56V~j9%bcZU4sB%bF!x9wkQZbrvbmr52f3CZj$}Te3uiR5l7HYC+F=`rJt%<+4 zxp%X+j@hjmLdBf26(N|Z1Q5z33Y{2$S36j_o_#XG?z}f0ybB9G)3vl??2&J6;T>Z0 z3SgGU4Yp&qcHxetY^<;CqmoTowKsjz$u-4@WmpiZNT4R^Q*sw@R|?9^SmAb;HQb47 zVd4pVS1d$qYgo>s=A_Zarcxq9l~}?JxQvA$ws|--+@|%=hV;C@I~*n7i#{Isdq$61 z)h}n$7F$0$YrFeqk84QGhwTXGYMrtw5>D1pkaP35bK#9jO*g~Vw|b4Otn$q)lUc5j zAd*?4O|Gi4uHD6z!fjSzfwu>(R-422`tGF$m3QI2AkRF`mXlrTBln8zyJeMFGO^f@ zoVs^5X ztET3ip59x1-|)rKl^efx+t>Bq@FDntZ#JW0XW>g|VbjH&g6S;M(c@wau8ARlk1k0R zg)9MbqGB5zxjR3H*FGV$)ML~-7BPvWWs4l1ev^{D>ICc?6HF6Z6A4aT|-%F zCsVM|E;XMir5|cR905#9Rng-@#D;x|w6dLwSd6#cYI5m1RMOq+P1l!W0gcgRhR$@yC*m!Xl%DOx*}P4nP7@b zt9i(CKBy+05U>Z#01UnvBigFlRT+ymcZV9Bi(l*S^D~`VPFF>pXIYQLI>cIl@m>AB z^CR5qVtAT4Wit?Psq$@&NPNW*z-+ettQN268k!^%J)W^8<4(odwHqj!TZq7r1sFvK zl^I7oC_)Gf8nLhRe*)ZUFMnyKK^%J#G-{In{u+_E1{@- z7V#+2Ye)*J3bBl|cnlaE zO6uGe$UB(hHG!?_+Mb81J+7T{x^<*_oS{XB$fT z^!z{I7br?knY%B!n(#k_BnMjXwzoc-n`CKX)PqG8(cFM5ymt#Bj3JgoCKDI{D{c%n z^leMxK8r4ieX8m9ZK>SCB$DklDW_Ip8lywDCv}NXNrq52U@ES5lGy$@_;2D{9T7E| z^vN}yJl(OjvM^jc@r6|(MMqU?jmK5@5~H@0kIeW!OlxAaLty=sZ-*wioPLfHcJm%QoYeV5}9q!^9pXy4JN7^KGEHXPz+8y|Y9FWsu0kDH&&CSf)wZhzk%y zRr^1QpA$6sB+~TO`y)ikP5kpLwy(6ZvXhOxF+jwI1gv=^C?b|NY2CN?zw^-Ug<7f& z)QY$D-1+ase-8dU-*_?~70IH>Y}#@Q*srJ7vLuyR6`_?Nfn6ax3X;NQ-6G@+^+= zXtQYSnbuWhE4A7@7cKjpxjde@1nt*<6lzyGRmP!zVH)ZYTCu*0?kJ&4vaAa*+)ANj z46Ws{R&{paiw5&)=2WD)d3k%=zTfa|uYR^ZVNKgbW1jGr!T$h-vff$v$5`>CZyk=U zACsbL7WVVOa;l;S2hQCXjz%$MqmOY?3o@=*tZV)Z_&=iGteRhlbjfY*X7g=st*w?n zDtU%2+EW|uM`?x(gtTg2K-`g!N7B4?@a8=l((7H&tORp_Bhw?9ZSJiRnbeZfk|Sm$ zg0Hn$upnYgEiDV;7M!-v;=4<`IWOjx$xog8sSe4VmKGRhY^-J2@HXXHn3XNB)+H-J zO<8K4wb#!5dwlfVYJTmJjp1!Cz@8DllK#g*i&4LhCQHj1ECt=Wx=KrVqDOf*Dj|%Z zB;q#;5GdU^@Z7rR#4F8vSd?4n5{r4R+V0XtRE|x-mEs_nwrc8 z0Spo!w50b08LgBN}Ykw1&I5lqb4#9>C}m6(JoPSV)`<16RZuY#0q`n!Kx_qaKv zwP#Uo-`W$6OX6G?`guaq$75!(#cOC#tYM0;3K^0xq+=yOWndMm`s)7x4)nX*n{6&r z8Nab-k5Dr+`AoAHhT+*5Zc(vOww23vT%38fspFrsnOgqFIf~oKDnS+Bo8^UQRb$E? zR1$zN+yf`w#&>CII*yO5O>3#>Hu0tVTf~=_cJq@P%&6mb^Z|)KHsv{BWZZU!Ra_NX zGyW6RKHWC|06v~q+#K$&-0w8c9O)h*GkA-}-Y&S32}>4>W5)8SIb|R*46KUfi3_!I zQ0?Gq>iYM>sdd)yUil!@ZuG+>T5LA~()J{c;#lK}lTGv>~1t6O>!%%yL-JUjPhHJ_Yebc zu{P&cL;)F{R4_%!$Y|W|Ty7Hhb3*XN`rBD)9wEMxd({hM_mDKuTCyFEu_C_KjU#x< zupj_if6TSR?Pn^D^IH8i^60*1eDX}{HD4CJv{x5a))9S@<;arW^&*)}tF);;VlzlS zP?=zGP-lf@tsNuc$Be8uy@E+C?c@zT&7^K4DC2qtU8f>VgN~V3WGY!nJeHGT;Z0{r zx!HB8YB$#NAF-daknsZww$i>^0;mRL2pW`!e%R*lwK z9baa~A(wW*N!;Y%?*Oo1NvgGV=H^!EtKY5DU)N(#Z7b?*uZz4$n)Q{;GD{82i~EUW z8CjqzWJU*isoW$g9Y$2TuH_rDYd136YHfGo9Y#C5m?e-)9Jc=eXH=8rm4Id15=8)v zZBV6G01V@l*EB6>Q@wpz=e4_%Mlm6V>0&6xEwX?Ve8GIQP0D#M)a~9P)AYSbbeVK{ zb+o;b^4?onE=f&{P%WS)KUmIlZqv?%$RF0L!mm@*|n8&Xo<^@vXm| zU@ul^#6mfmI0;apN`(!%&fUSY{70V4RO-GhxUsnY%eb|U;@#DtGGbWd9(G1b;1FCD zC6uc&h6RCTtmM;lW{Ga)(x>AhjZDdRK_`3ymX9 z(qkBPqr&o?SXNudepD_4t*|D{gdJABp}Z z>5|OnRaxT~UT!xmZ7ZzB84(j{We&hMJAr1#;JbyfQ=W9SwIrrM*CH=EU=rI zG5fJBc>u^*?8s~=k#_;NCnjrM1kUeiugzy1*Ja>|?Uc$AMA+)lkClT)2xV`Xh#|Nq zHLfo-c%+X_*YzWMuwDyAy3?>2pf4Dh7aubg2um=+h6gz#Z+tN@e+$uBt#5zE`mMYE zPBmv2qc`l>;Y~0k^wiHpXzsTLDfMyjrE{hEc6ay`r{9 zi%?}{l%lTIm^6E%P(T?WhSqX+k{4}QB-3o*{>jnWE9sWr=_8Wn)l%8!Dnqd#@0r}} zB;+!dRRjdZ5NeIBi0=uCD@Y|DX1A4q@@@8`POMN#<}Se7Rf97a$KE0FF>l&?&))vM zb=UgX>5XcQTbY`X_`_SagxlI%*us(dQ{U;=X){MEmL()sU=+Ue3_^mcs?5QagAETA zYrhn){4e5(ejQzD@#~MK-5YBQ`)iqPCAuiE~mv=I|Y7D|#Sf`W=bx;b8sKpW{z+5Vz3zl9ijYjKA)aOFXB(-D%Om=AQE#5Nj z3;AfzxZwkB&e+s%$Wq6mgK?U2)p`E_U($M;LNaTcnXl{mx#Zpq{j>i7X#W5fd}+P- zTjE0Z}8v7ej&Jf&lY?{GfTJ4 zSnPDY-+SiWx<(-e2|K*L;<|4R_-{k-MbWvpx0&S`f9)yAlHrw02SspybcAOrqA`w0 zAoG!V@jv0zNIZ`lTWpo1Dwf7cgm^6Fos@DHkOw7;E^*eYuf`vR*7K`RsP#W@vFlM zjQ$Mq{A3bIjBbU`ug=T6{sIUX$EA6+{xa0Br83&##_X?=mG$6}<_7$0)yp!9lxnKg zC93KEZ~TsYMk}5?iG0le0EDe$u6$MS{ie65MGdp~g4A74G@%wa)>SObPu&0^=e2G8 zJJw>C;y;c4BVIP|v-oSo7tt9qcS}BEk0;mn{Cz5|j;p9ee;%Qy!EJ4M6K;y)MYr=5 z;EaNzCmm1gRy02iy|fqhLtW9JgI2IMck2(E3p@qY$^ywCUzrz?<`LuTs4SIX2$v z-}yD!g*o!u*8c#2{{R8%AKBjsd_k$igvoJnrD^5y-6fW1A6>Db{A;Zjk|)FM4|5z| zUtZmSeH6z4KEJ@!X#NZE<%fe?!#YNdsa!!d@V%OAO>WZRo>3o^4j*O!Qh3|co=t7o z_!mIB(QI^!-w^7$e0DZ+#HR1VcM}3$SCN}AmI`vfasAxXI&IXy_pkCw$gORsZ`6b0 zB%f%t&~$5fRb;mCwUiF25-S-b4JHl%+<$t!o_Xe^Hu2y5XG;qw@*<1G3W(B@zEp4? ztWToD&wjPDsA*mg*8FP@yDyKtGS=eu*`~6%x@%`KMdgBlCRNT=56hnYX?!*C%i<({ zBcH`OZ-$NPJa=syX?M}iH%l9;h1l6Yf2Xx$D9uvUeV6yi{{X=o_`ZLU&Ui_pmip^a zyNP5F>34T6=4Xc~wOD)ZkahR(ikC?u*TVYqh2^bnZItJ;+y-upKj)b2-8*#`*T8-> z@a=}B{j=c>TGg7$I3p8j(tylnMkk>M3WVpk9<`Y{YhMgaYcACD^*FL0l(-(oJDWDZap(5bLu^*_psYp z_;*Q}USBfhuP2L64NX&D@G{hhYm45Bv$oKGg3B%{|T4)zmPFKFOoqPb0gi^8Qs_+>y`BM;ScVJMilE zXteJT-U$9kG(!_fk~TDxMyf#`yH6bsFnZRvh2eD2b=^@g1a?qchcV<6EWi{8_)gKE zLIDS;s;?B?RKL3)>+c9PX#Ci^CBs73(|M>2T1??VJh66;F^_S$pTN>sMI>Gw@gAcN z@E1!yRqk!9tdsyS!R~y< z&$c2Tr8h*gSUdr*<`*q7Tam#p_Rl zp9=2h(zPAB?};OmOwhzYhl(~)_M4oa?xDc{0B%I*zp9=oQ+JrGdWgM5< z)~@(fd3KL7W1e=XRYhO!s*a}>@lWk-@mBNW7l`z46WqrvP|t05J%yytxvi|l!GZ-p z-DVI;BMblxk_OGyOl{6&Gj_{=?d7gW|>Jt2M6sL!`EyB4qGxWR5k)6|z85 zK*7nt?_W{ctTuLws%hF3clWa2l1JEH;^JM&Kb0DlAH0y1CkPvGU`_x8eDnJ#=$E5F z)^D`?$!)HtzJe=h;fg}RkO#mi^KxWWB|#&D_4T+sKQ*?Sszawpr7g-_N^LHskIJ?4 z9b*PnC6FmB$`FQCR&C&J$KpIxrvzn`*S=lP!s>P=^u^q1PB{{X1*JO2O} z_=?w614{80z974Ua8QkE{%f6p5CHjtg7R%ZLZYc}lrgF=tk~)o_cF<-T1t?}=?ExI zGv0Q*H`Az7Ask#bztfgJ9&|eMu0M@+#5UH zzHOvcOFt1!s@jM(OGsWMXq#@Dv7)?)hhj>>K~)5(D!5>;mMjXI<1|~(7I=fim*Ygx zX1dj_CG&2VNz&SPC6$D7<^n)50l8UJqcaop72MHkTBnFFp}NyaGY!Qp7kxc9UyqojiE=x=M^^C!f7!;(qY%Bm`R*<60=2ZN zs~j^S2n2zZnOPM{3$>M1l$M`dn?b*Z-$%aj1Q#>u@H7(INqDxgEymX&Npl*r0@&Ne z_S!nJDZ&{>sx5Tg16p;o^VO%nGMIyy${?3}0u|7?41Qo$2Rtrk)TfTp71R?)N0K|1 zH+K?Vv{6ohGcu}%Rb#vs35MJ-tV;7u*5pZY-}>Kw>c)z%GSu`fC&Qi#v9Nt(M$=iL zhH#!ur--(q+(hBzXxUu(W9=JYSt2Tg1Gp6o%Z&EgUy8Lm9}>bWEm^N)i~Btb2+F)+ z(o~h4f?v*k%+9Q>8Dq2sTIDUYy=L!3o<`DEE0b&$=85CD5JC)!tA_=QD(|{i+rlp2 zlzhW~UykDUMJ&1$(dseAZc|$tfwPX>AlT9sSjcGDF8=_VX$`SKAna|Sh?`eY+or$p zeEh6Rl%=9CBlwHN8dTZ@l3i)@{js;)Y}*~uBrG3mM;wrcupx1#xD zF<9A|9b>$O7A8=9$K4cf9j1m7&=<2RLaM zOsE?GvIGm9oyt@y>wInb}gpYXS1`|lm3zZB4shd^#6P z4%bj4`Ad0v%GKSxR2GYb-x-P%1X#(F*(yi>D3>?G(P~{$7>J*PL zw%{_WD-FR&9I39atjSX3TR$a#^WV(TN_174(DVn?ydSLDU3fFY5n|Ztk|dA_{Mlk? zSy?yBF)9}zoGR>47?tOY%ZU6uH90lC6JOGAorEcCVJuB?Jf-CXHbVqbvp6`BPB9_R z8(WM&5Z>YlEbphX)8-hC;!^V}O#{d?AVR9KfJ>fOvan!IFtyg`8orXcU8I^^j}@d& zVbf%i&SiPymkws!K^*?^NgIIj*lr4|o_QxV?#t)tr(HGc_;fXeYOUGqYs=!_5ovk_ z-KF)@Sn2oBOA|@0=}xaSTRgc5MN9-~2vsDo$~gcAQCS`$@gIVGQ*AZgqo`WlUE15n z9gI!$30VrMEC^tPxdRLfZ9qXAnT5?`OdcrHKG%O~d8>nE6sWCyWmTOd-W3XAl~Ub9 zvmMNV%8jGuJVm#_uLF4T@2#x0yWb04z5I6#4ehMJ$QjkvScb_05gIXHGjYs{LV~>A zN|h{i@KDbyj4V1C2A?x7o$hFyvvC0&4jD;sv7F?OyFMvVUe z1a$o_NAWhDcX#%5nj;P50Ng+$zmrWY1PEQ-RU_JE3el26mS>UJfrZG()4VO>+j9=6 zn%#;l%&lzNkznvC8yVFCDyZNph1|h|WP;fWGN`v-clG!Z@f8-2^EhPjUYl=e=2_m{ zA1>vkx3%)vM`{uLqR}}}KwO6*M=jKY)1;S8_)Dl;%c$yJAd6R7qfz#Gkz);TuOiN& z$WUWwne()qGQ`DxQcpJzhiz@8wua&v<4gzp1Z7>_LpPSp90ZU6<+2+J9IFG7jEL;? z`H8aBZr~@$ksj;HV{dH3W@KW4N0y@nn{i*9kCSrOp9!ijno&;u`hQ=6;`6Ax9)T>H z4vDVIEt6?>S77~y8!L-PY(~oHtXFfyV<_>cW+6f2EP7a}qNhjn=nD`2XOZq8TEHD8Cv!*d!P|m`CzJILBq-NAyoMhB_(oS4~O-e zy*g+lQ8MpGYpJ1FB63xcRfI%tERilt5ZhH46Y~bx9u?I*YkBr3*XD)fl31a(4Yd@8 zUzKI*pd$#{Cf>a5GBL{2+&?@RBpZ~D2jY3#H)xMh7GNo{u5MYr=TCYA-ZOO=%5 z#uae+Wtg0?Ql*aKtWFJUXr3Iuk5RTyhx#qNa9W7Xwbk{cWyp#tSjNukOC6=vwy4}t zDJm2bQ&{k){2^ML;u*wtx^>KA($s1Ww*gsMmJQM}GOB=6YY?RZgPd*|IPeohXZh3(Vz|YHz*vQ8!9oUPe=DJUoV%Kr52L9u_E|KP1Wyh zEcH0n*8cKup5E)k1)@eqPby3iwVwnKa-ex+w&lv2+Ww(wXyM|w5?Q)Q6~3WmaUYP6 zxSw$f(u11_5PQQQXsWb-$;1yr!Z@JjDprLazXaXut!zAEvwR=z&A)pXR3Sqk3i zw#g*7kRq&rPZVK-Y)4?ss##3mF~-oopRasQ)AXC!EWBG~tK3{_63CL<&0wh|yi-Wy z?EsWjjg?8ls9>tOTrn)uLkWsdYNFLI&(ES?*2b`IP`&j%e!@%ZFA1)Zb2M}PgVF$9uRLpTz8uw`QPSeoqnAX9gEKU9;n`i`T)8X1{DtN5(BkJ8U5pBSC*RY zRi10T7gw3Bo+7vY(XfvEg_~Ar z)<%!z+muFJ%a>Mp8H(i;F5EM5%MXK$lq~P3`8)prNX||P=zRyO-*`zpHK(RE8j z{eM?8l;W2$^$&)xd^3NewVln>Cf;bJb0Nz#icU)vL6t^m%PHHP$}zhHon1r4eiM7C zZ7;N&IXs&|1e@alV}vZ2V#EenFc7n;B(1wE6=h~0CwNoh7m75Moji4YHHE#s)Dc`c zhlwOyGs=>3Cqb4&8u8JKBJO7NYp;z7ZK+3n;(dM#Nm=BM<~c8wqTCf-NK&O=a-0Ad z51p751wiwv;PH5CN}N^S&;0&Y?k;&XW_r!fimv>~bj=Fs8hK1EQZTZE65xV9R%Rv8 zlEATcW^AzpR(mgOSqk0OK5nvl0`!Tq;6Cp0bVvKwTiNaKY^OeBvpXqhxyIEB>dbIAF#v+a$$tA@nkS-q{J!PrS0zo`Jr7E-)*;jGB)FDF zl_d)D1QNwGYY)vNZX6iOs)oTFoTy;SwyDXZ*iEb|S9n63gMskXP z0yicY62)8P+J5(pd`A7P{70onr^BY&LbsZGtXDdd*t*-6!)=dxtA-&%s}R7kMp6zi zytS9@Td&?}?QK4);&-_pW3;;ptLM2+72jx3c7g+Zs$x|eMsw*b94kRM$8RV7ecz~^ zN>{RbA4^N&Yk94$E#kX_P}H?I%z1HkUiQz=>&nYR2kI%Z9dAXO&^ZZosZXxC^@q zHkJW`g&2Cj#!mw2SGOJ=yw_7tlG6QUHte|YJKftuJaUpUrfAy?0OSniwzzIMH2r12w(Vb8(zR=gtx(*wY}$fI zk{FqrZuw+5F1aM{0hMsMZb;~cTRKgo^nW-101bchHTuk~ddKa1r=|VwmEx$&G>J9d zopUD6p(l5XEz=-*ss~&zIp(~pNbu*6?yL^A;hh6np3!G98=X4RX%S=qF;nG(gN9Sj zsIKqgmydNH5y|6E1!`J)`I=XYP0TZd17sd!#JB*0Sd5ZbXAB59VT*sWKgBI;;obG+ zhMjpdP)~CV^Q24$>;^)ocD z0HAswg&HgLJmmPt#QJQe9Y$qWQ-+TI$~9np4&_$;YeP-=pQu_kFLY>bBf$sk_fo(| z>g)dij@Q;Y?~XLj6Z}7yQB67*NeiI+Tq@>M##n|Nb{OXb92)a41N<@gf34}4GWg%Y z8e+$)%^^$Yj6l|LvKV7%cP zKn6i#I3261ILp=ceQkZTYt)xY2Ulg{ZwlQXF3-e~M04`()HY|QX$*C}qWDu@@f(q) z-P~OT8Qd*=TMP3@O~1;_;A=SI}+e@EFcH#Y8hq9YygkzTcgP^>?}4L!j!q z9RC1k)HRJd)$x@}yWrNm@=0}My-r_BrQRhm8P!oEGW)e6Jmt4|Txs65G}_@3dSU-$~kDt?%*l3iy0 z?1*lDAl|-L8#T>3#_$jO?gW4QdcJD2{jRjj)4x~HbUTJUP3MN1CieN@c`^0HbQ*8% zOWKnOTSagHq|V@5}u#ynCHyyYTl&yzxu; z>RlhjkxX?FcQilQwzp9h3>#=RvO6mA*l}KUr&wt|4ETngD%QM2lPrH~ki~hYPh{;h zkw~gbOo4D6!yl&=YJZ6yExCxpXRX^_AUqgv2RZg=kN$;J{{Z1R@icO7l;1&~zCcnB z{qrdQ0MKe(7&Oycu3wj8Zn_ctGtw?B(@-g>X+rQBB z)~4|9fjmp$&j|RTbx#Ru9(~P?t+t#N0{gRa8b)1&?!qtYOFpx0s)d(QYk3GgFYI!E zIbkFJ03NG)cBSF})u`G@ilOh5D*B~CKC?`!`6+q(j~x6kn~^wv5L!|xD1 z=$hJVtwzED6sYOB6v-htY;-vF``=pK@a4&$;lAYG`on@mI-yFT20BuPwrg~NC0PV zVM)`wZu`kg?puBWx8-Bbo=dpA6g2j>3X^NCF8qL7Wx6NV19A52Qw`DhGS0vR{pP80 za7Y7pM2Khp`8A>YNO7;!6&0I-^4Bfl&j|cSjzo_0#5yLQ3~9Al-UGce$NmcO z4zX(zABeg>s3VPCtrmZ>L?`Ew6E0gEVWehnWvyw!MOs?BxA))l(3Yjpqx(vHXuk2h z79JYV^-^>RpJfd<{^hm*s%@P+X|650(@#JYZ+5h|9t6^wE;fT&hv zGq>TH{{SJH?fxnFW5p8P*(Q_W>x+nN^c!nmI^9s0kr0;2Q}YnaR5x%3&|=;A6UEbO z*V>FaL6I&VKPgp_Sz?Y9LAgo!4il%fC^s%yOGmHyFTiarQt~}B;Ap$>zr)!hhfI!1 zCTr`amRSgilZdxucG#u6WCg%Hf$I8ww}>^n%{#(Z7w_cAT1y$8RPxB&QKfj*RfgDu zhEjf2Cuv>SZ#MW_q#}4fMw-gr-sP7{Q!TyK_u0}#W?9Z;NdZv7m@8u-u0t;34{@Vi zX_|+Ht~I?MNQNtmojHUyw~R3rtoKn&&4IO|c4*N;p*UwGfV)w@2hA(aw-e)=*-k%& zHT`uy9+X_TgE4&+=>GufJPuz_mr#!GL3?nDvE0n@W_Z?f8#6g%U{`r1L`03DnnDKG zI1;ysH3_uHVAG|N9ZjqqFYbh^Z49!^+l#qkFh<22&(5K?@&gmm>RK*|W-c_HXIQp$ zyt$UarY=I+ZHFryo3^u(5>*Py+gXb?3XR;==BKS{8jAQ?G`Rf8F74eRnngq^&#)?; z$^qKtS~Ii;l1yhGFOe>2&zdsR@%%sF=y&@Xja?$G?ysn7z8#XwNtXKZ!Z?f!)5har zmIg!tcd){#EWtChWrw3^UEAExs3J|^J3BKJhFGm5wFVT1M46H$Dn?>uUIODvqK;JBU&=)FalJ#RJLbeihfoyF)X2!bw`;=PF8U20mi(g`EChz4Jh+BqbUv&uZBDpaXf zZHk{Q7q`{*U1hBrM;7{cGQ~7a=M)JB@TkPAEPH~f%7#`Yg0|2Nn=goiP1P^--Dkrw zy|ug&O?Pu`eL6hqitj&==PI)WFh;1rs}%!jKQnX|H#%OA1*D!0)O9IwZYOCbM2c&5 zl^PaZs91#93Xs8>i!%Q06bd6BYOK>*?|)6T^3!h5p^JofOs8iZw6=B==^E5fZME`s zdw~%{A$P{-B!x)TfjBIx+mFnvw9@!f#!zXN(de*VT|CzC$8uquHN-Ql5+(-X+i<{0 zNNg!&WA}#IhUZI*U)QyHJXkGpHOz~Bc`d?6CB%s|McWG5A(53>zU3R0c9kDB`=1zF zY91OX;oW)&pIpB-%{yhMK@%)zV9K%#MYWVL8(9LdR%SaWxTh#K-`(@I*UxZ;`m?3@ zS9|b6?&nj{{xq+M0%{YM(>zC@OlOoAjTu%*rOMn#@*|gtm9n6)ATusDt#jZ_JH*X# zpvPh2dmP+Hr`$!OK9?y)btymDCsOhb!M8MQBKaHmLv)L)jaK7L(|jqa$6+q0@KUTA@|hq;po-K~87dh5Su=4%;Et1V2SdY3n^Wp`&a#IJBo!7i5% zYF>7gVRj4xxI$M1?sbTk0~s;vdKbdYP36#KTXPFr;@a}U`N@rM(9pV@a-UM6DL=s^emnX$A`+<91l? z!3$kBn=Ql=*k5>u#aEhsmva^3!+WP#t<}xTM#=l4k;@UmM=V*HLzxR>h637dT-JWN ze_wgGJfzly+8$uHj$JE4vyy8-<{JW@d`Qwl`;@=Tep9|I0kwjH06ZMqUk_-W5;ndi z(DfT7zLqj=Ak=SOJFh&cZ~_!xGioV|%tBcU#=%%)YjtMuq?ZzD-Wc(=v2zTGiEn&G z;m5F^D1(0YD6%|EOUeSB#q*ZhaK@itvS?b|r^9|U^7QdE62W_QESCkzcXxG=BL4Gm zf1OpM`2bKlD^rb5x3}N6>;4aKGgjJboAd4Iay>u8CsLKQ3*8}MxQa#)+}d77-fE!v zWI~fPU>0r}x`tu^+qhb_(e-N$I>y&cNYWTB-pXjFi9`@9NEs0eZU<|Uxr{RHU|4{s z2a5&&0ExU&bK(t8#M-8+kyHHW{qP3Wp&NQhYX4=~^&7}P3~+1;Gfu<-@N z62YZhT3uV&T)`!lrErTJamM*k3G)~gMkyCj!hp)Er*kN-Yu+|mc|V_@-};S7LVD}y z7hW3H?L5PIb9Ws2G60hWmM<&xG)pVU^#LO@3k?bIwWH3hgl2bel}SZ`uxmu!xN7{q1d-b*1;sujNeaH>hCFNXAOSM3)zzZBtX+nD^XvRmC< z{{W*zPR}io;8obm@_;wKTo6eFRee*(?cp2YrFeSZP`XK)B$7mv&6O_E$PA?9g2*M4 zf=ek~*Z>ivW5};1qWyf<-`A;=T)MPy7nU~OD~)t(=9f`~TuP=%-cTOv%nvHNC{|#| z9e^*0l}Ki00flKiJw1-G1>MWp6`n9`poFw*Zx@;Sm~wt(Qez_okXAJVlZKC3yzw5F zre3wR^mYwnaTU7RSltqq@XHxgZOFK|+#VoXa8+k$%Pv5y`zuc!c$uye_g}e=;^xU> zlGpnTVU^?Zp^SnfQoGd%B`~0~WR_ZWs^IF>jX9@xw@r2V_tW}(5K&7_&dUD)U+{LB zE8W|yjRaa`Ht=0t-KxcLEOy~Rklb!u9jw48`BfuU#sN6jzCX83Qpu*C%JJub5A53u zG&e~UK}c=ns}_+#ZR*9DNZ!N)S-v6ghK+IJNymu%H3V;W8zaj-jkV;~!I@fO;U5-d zi87eu$VCz=Fv`_mhyD!kABGxx%N;jUnnPgbSZ;1h7^5Xd^3`h06%bY#fhgl zP^Wmo-_J*{_35R>K6tff`kg(-r>EU$Epu_My|niFj7)7UE!AaalXD;{>9LC(DwPah zapVFw7ak+qs7-IDYI?kBYdmYV-U%Rxkf>BL2M$5YoG@VENC;dE*OyDsHO(d=rP&45 zl20z;?@LLZXy%9*)6FWIYA&K(}jH_igZa6K)+;~&qzMXsh zp{V$L#%8t=!tZ-4O?p%0KtmOFW=+AlP~RxV1}o0~CVtX?4>W;ureApC{{UTi?E}Ys zcj0NYmzB$9;Ep+7Lc3A^R@{DKyEt4PH^ZN`cf&mz?qNNd7B}`&7^Q;e%(~N`&PVx} zqO%5!ILJ6q1}oOSMu&=@iB5+Tl)DP;b(E+sJudL?v`sCIpkoU z@hEHAB)XNcutgC;0pa$_Pn4`=6LNkEMt}-J#ODOxeX4;{pVezwEysqnBy?nG* z=x-X3_ImH!^$lyl-T}OLuOzn-Nqy&o4Xm-1u_azW{{SN8B-q3jV})Qr&gCn?{{X_t z&~)qj{{RQsTHdYZwR$DG)U9<0<7b|0i8tY$6`>677|2TP$qRrNIi5KEve(A&HkW+` z{qNay=u|9HJW7gGVKSX!AO})FA%MX7K&;P)UJTKHX>S|p9vFWb_+!H2?l~Us#2!7= zoh)Fqg=S`HT}f#HP)C%-S(p*H5x8{!0A}KoQJVK&o7?)<`~CWv)SGQ`o3dN{Db{s& z)BH>06Aq=QTfqpL1fKfn%?de5U^Z^hk`X{2VcMlwl~pCjOK)fJq4Xp+dg;EuwbRO2 zO{iQ6e6hH)vOEf^g=BSRRaPupb4Ws}uZ3zhUM{-vJ;nb3iZu(XJz6-PXl0J^5*Vj2 zDuyNvs#R5yfkt1w5|CCyoj(tF>P5Y})U}OHc<&&zXs?C9Y2g7ptg8?imNr8%&Tv`6 zg<>a;gSZnb#qeS!!KEZv%W?(Jt?N8L6sHkVv;zFzL-L%1q?cazsE193dR`+l{)LSLGhiGWohSKG*s5VRHWu12QVy8LY_$}ez4c%Ga z>)sO7pHF*vB(=9oZAkeSt+OWw=6o>{D(%9uu~y&^V-re^3M#BO{a1b8`5AlDZ&JnY zg>~C+hYNTBZDVyYh#$Pl9n=t6ie~|o62CS+TJp~l_@hwNwW%yMd)ecf zDOskD?U`eaC{#A!FvE7jNgSQT0h;==O7K0ei#!pg+-mp!AJU=Hq7hizU+WV@XSlv& zFUW-gv}?6cvob3pvlaxZ*PrN@9|m-3BDI^sFhLO{OkU%Xirdx}5bztn*a5FL$aJVuC;yBe0-Yj>hu+^sI zp?NP=uWS6g{{S*!DQS1>{eNDjeZGkoiM%^={h6-oacRwaEuZgh;)d24qhFI?a*Q`K zocWuue(Il{tIs?~sAy@f+^ilBO*-B_VJ^Z*u1Y4=QNM5s95WG|fZ><`S3c7x?HdH9 z%f~ub!;cJU_OL9Lwl_Ksmj;^@w|h`Z?*ciE3m3yUR90pi3lLVg4-|NlM%Q#mwWu$m zX`zxukNYqSkgxN3IZc2m;ZaJnGOCT+iQPsLzAg`%bd+yo^;`O+ucu$gb>~ga(Vgx2 zdOz#$I1LhAKUJMHyF`%fjos(@+s_-9X&6kXDo-0*I|1CI<*!Mw_+dyf;wspNcg<5!_#b0s_iEkP>?ZF@or-crLfMhsU$u7A$ zUqwwX#~MbMiKuIOZQNH1!W%1ZC4etB(9)5W%NX~oZUhG>c6Tjh-+s%!7rU^s@P>t{ zuAek^(7|~%)HeoqxSI@#5q1%{XL7!KAlQ(GP|U}4{vQ33HE#!MPP%lG-CODMm%dBI zYkBSBQlvs5cUD%EnND18Bmsp$TDdV5>C&gqDJIuTb@T0S>!Hx3?(Br|9nHb`74Hv)afYj##&>j;xFn zukm-UN%23xPYzpnr^FsUk?kkAl5JLKZlk;M<`TSevqQ6R^8$xm%vX<~KZ9D$ zuZMh9E&S}N(`Z+aTb4lBmPmq*y$&}V_QicyhuDXX;aJm-@r)P#Q>J%t8~*@|M*^Qp zG=KEX7IYVWIMuZn;_%Lt=>&HdepF&L`BXdo;6D=8?%IBr;cZjGV@HcjgJiITlGtq{ z%SiTR(2b)EaM)(tLgxfz_(hhTYo-hD71~>wpG(uOZlH&0A*I?QiF4SqNJ8X{mE)XO zoBS*I?W=eLP`NPbv&pARcw}3Pi-WlyP}`UTkQ6y>;1ise#&EUkaTaTZ!egmnWui-c zEU%)!-M{p{g05pj2U;}hzl*PKi}5=T+UvqFcyHoGw~KTM1S3|p41vFN6v*nW3rDqNDpgAm6cCGr4ocU`9~M7oG=oIb?XPtvmRR&SE}8tih|5Riq=cX1 z20Q?E;A8@K0bJjLzimGmXd3pOe$zGLC5u>GXzF2NB4<*|7@b>jc@89$CIircG zd&Y0?f987DrTaB}2iIes>AXFn%Q9+SRQ7s)n>13EWtS)nGB1*-%0~9wSt2d8XAQ*a z{{XUQ!EI+#p4Y=qtUy9o8a7wIg=CIsbVx4Jm9&JJo*;kjkp7Oiu^yW_X+wwXVI~NODZ}a@mAlLr@ zW9T%*F9(n2wHs8&7Nw@6kk2N_)tVVNBn`~lb|@n!70znEvcJR_U=r!xB+zdccuy=| zNeqFGFbceQ8D;@_`MQjbYv^0_mrst?$HclerKeizHqmL8w-IVG$1R=9zyORqVl|3a z+WU7l=8b{^+=BbN``gVb32h^?YjbIDcO;HoLV(E|C?tOLt1p&UATuFg#0*!VhU47! zn`)#~mv)z4nl#n7ZL3>mYYgWT8QM`#@cfUJum1pNZ;U!+*@`a%J>-%90IY2;>I;(@ z+8duO@}#RN!NI{5LgGJ*J`e?OEIdo5qBaPcE2z+QAH+8HKasDlFZ53oc)BZVYX)sX z^HQJBxVJZAa$<2LNK+*OvZ5=6!B5^5EX+>WwCk;M(N2?ow=u`|7LpgWjnWrbkD}m`^m4D`7_M4{{WA3L=;?GY6x-F zXNKT^R4x9X03(&X(xkxrV`&HJT~CPr0A;^}M&9cCLGjL~YaiL* zywh4~RKs&=D>|7Kd(w0)bn1K)zs&CbI_tLD*MOeNOt`>j^aJBvu~p=-Ei$w?yf2K8)ng1PBklBpKCqp7Nk*`Go$i6NbZ z!|4{MEt4g(D(CS~pU%4P3v0Sf+z4gVZmh$7<$Kvwf&Tyl^B40XlU4X{@Q=e9Mb@)< z;Eg&98(*|sNhCzV)2b#|D=tcmBjW%LxEbm3#;LFPmtVSdz0~g(Ipr+2&2al8+;S8H z*>jJ=xiDFrajI5>R_-rk*Hi4D68uo`cYyShtjDG5I@nF|o4p>(?IqY}aukIzXv%I*t@&g|=;%Q|^$CO#o1d%f=fILih23Bi#viN&Xd(9@!$~)$j zXY(zlWO%La0{M3F%90=69qb%23PP^Z$K@-f;f-GZ07p$m%S_gE<%d?7p}7d&LPjlO zja}1l!^joUN@24p`6V%oeg`?zt)Am$&8wR0{l%yJA74Y@aJ5t749O?|052*1{=x{L zZ|2fQrKZi{EjlN&Xy5G8=`yTQx+A6$m6)c}nI1;mvJIr;BN;m8r*AH;1-7~2>%Bhy z=4*7C&TGXM@>)DVy|(`VyyIYLcdisEJe|x>py@s>_*ZS>iF6+uUux@So*p*PYC24a zw>GK!;FoJ?h~I0DG88qkxFqh}SS)BcTg^(-ONH$`C#=P)UZvo(k5#m|ktLGl6nT** z@f4;xP{wx^ER0!P=V-2)kfQ0{O*O8G>;8V5uS3+7sqwy;-He_)@ipFucc|&!7`3;aLk+w)fAEa8#o9)8q>@0_7Xd>l63H2r zKn(J4EJ#)7z8-ul_;sUcO`v#RQkzM)(R74?<7?}wW{yZ>P`5EN%QVhq+AZ2SFPX2peeZK9+(ybL0{$JPY(KD#<{{V!wTg@@9d>3mJ_ty7`1X@Ye zZT{6ftG&y|2J-TcF5Iw@Sd|`QzIbKMv%B%N;#OFG8JXP z$SfJI4)Um~4G#%wz8TXz6fSSRA>B@wlEFTdB`X|vGEQVy2_$k(P%t@TxSYf?eqt9- z#CLu)xq-FqCs{GsYg%&NX|apF%7QYG%z^T%va1kGY^b|XjIa!UpbaA`yGju(GbL}&SPu^032?} zweUBGXOb`N9Z`gLwxV$?00_;(&9+pWNJFSm@~WzEEOlGCu38Jou_0wA4)D2* zxIS1zxsxia0bQzAY9%{!Z~cG6ulRe*SJAJ)T@2}|uEC(_I*Fb*R@`1sf17lMRgoo- zN#wKV=VpDx5D-SEX(TO!@|Mf)qs_Ex`5HL|~* z{_)AKf9v%zuJkQ^G;^+8Y4A;_+}zvR%_o$mDI`gDjj_7z$!C4CaKU`QTWJJV@hn~t z(Qcy9F5*Ro9oH)?sL}{giC-o;ca;$&Zmp0vJ2Yign6Vu^9w4~6(`4|IU*M{!uMoYUbSI(O9%TSK)(0!RFy0llfju~Ne@|n=^ zSPpiV3{_ZdH98cWd7l1%m*3~P3Y=uwqi^CXty{rx{64k#cO-gzcO`(HOJeqyVp!S+ zhG7ED5n}NH5t)Hh89)=|yLaKchO&_%Ru_qAyP>%X1fqHFZQBaDW^!ByJBHJ>i32!` zO49sC;ja)rtopUxtWa9VcGp5LETCx-T4u*GGN3AUoOzBIungF*Z{i89wBuo^YSLNQ z>UX3|crF#5?5HTSN{X2uXn&k-1Oi9Oes*rEDaGDWZv7pslKZ#yW0tW=^cTmvXNT;c zRq*DGqiOTXy22K`)os|ii*?VFZ7LjFD=NgYWF4VF;4#NFt>~If>@jGGtZGZPNSjPH zkp&layOqpx2@XRuyWxQ&_hky_Zh9XKcz)tvwfsWAiqBY%NQ|vC`f-jcc*N?e(wL6$ z6&Zj9vZNAKlyP1BBd%*QT4+`}$knx2)=Ozp(&x<(B0}<8WllgP4ZseYh%L3suvKc6 z8dW0nyMNcuzn9z+ioK#T&%~CO-VB3G)peNe;HmQI1W{qp&IIS(UnUcn9C?8VMX1kS^ zEi&N_&hI^uMv2{?lI=HcB4|bK(A_~?P87{q#wRP zpD{pYC>4HEcdkAhcuT@|7J9awu3a&f>Pu@#ZDt}%AjBzkQmWZva8zz@FgRlTic75r zO7Wbw`VWV1?XPFmVv#&RF5#ZhZJk0C0opRVCP;*bYOl?;iySJRYP8de=S!QV_5C#U z>2qkb<0!`8KkNPvVx69g;wu~XJ`}yZ(lu>9?sSV$w~4b5RzoIUCI!QRv>fGkFx*GX zHbC*bAK8`~C8n9D&24WKSMZybn%eX=yux>t?jL+pF_FUzfWk5YoFg@l6nq}=*MoHH zTWu%eqg~(H*{Y?q63rA5OA?`p97u=;@MQqwYOG+DX52&*Xu8guYcGfOEiUTj)kK#s zsa#tdlRV(zk=fOeM0qUAaK(&ja0;z$3X-Q671z`A^YYwNSa%Lsuhz%T3Gx2>RkMcc zSiVW1S=Ln9e$zvC=+C_ZWbm1hixQEp{u?X@%u5p$A9hF5J`Vl8ddXVmOkaM|rtm-yV!D$|?WKtV~EX;e7Sd~gLg;9YX z;}Bq2DC3L~irRT>WmWQBCEaZIR%Fw}RC^y^=-(7{Ydu)$wq7PSmS%KIS+DJ*wSpLs z#>sSDqu?q5Aa)GETobjqV;-TeO{&Rr;ypXYN^hgo1Q5Y$43SK;GnL$?RZ^_vGVW5L z9dJ4&e4}^q-^3c{gre|#_cv0s_bO*^I!sL({Hohn{D3Tq6c)k3EHdm)9);q69BG#m z{{X@(sw@#sHs3>WD!LG^Hms_CStC*dENpSPLF{T?RZ;Bh*Y(r>7;~tl70l0~JY(bW zplTNSPl-Mt+r%JgP3EqeV%kXZw(KZ@!=jTQEV6)si435D^26boJZCM(_Pv(BVYWM4 zdx^fwbhk?sv0frR$iO@*xh(CFsI9mdgTig!^GbSylbl1qsywBupH}OB5XB;{|}hIUq42vaV#-B9-K_*BT=Qs~#h`}t)y~e;` z3<(I~L2Tf%b5 zi24+E+LSj^rH%ZM!*(N`mv~Yf6?o!s(ggXHjO^P{f+NM z_55pY*NS|lYhb7b$s-LjDh4}&Dy7REzESY6<6nmSIFZAw_%dttmep;e8uPyYLpR;P|xdQrB68we(tg?TRlZRPsub&u0TQfAUEo_S(|D{Dh?>;i5I@%hYhRPJZzuN$}j01x%pd>?gh zWOxi#6ls06fn$D2~#j-JNGlQ1_vs9kv=@@-|&{rrD_+;8>Pa*YX~hYOB+hX z;{+Ydo1|=&BPyVdoN-X2e|DT!k835@eKgm5`Sd2Xv%H&qK3{&{ap=Af__yQTKg05P zJnHf;m3|_Xp_Lh=w`SU?p-U)fnM|zetGFVu%7z*9Zyo$7_@(0eZ}?6xFJnW0D#4}O zzM9cVsD_lr!O&q{b^y#2jI^q`0l2Mu-~Oa3R043t&U#9!Cn^7w2e)CL#j@ihWNtSHGnfL{$ibq@wiYFhh_v8Za!qdQJJOa zz9EA0%y@5Eovo7QPc{o+o_i3=%OgabM;e?GSg0(#5_zk-r-*;FG^?F+P`lG~sBZ2h zDkQ!}o#PByLPZ&3#5Ue=r<0x770YVgE%3L9*nk_Y_0xRL{JzUa$^1(urT_p-dhW7E8nUXeEZJaz00;)v z8=Ls6wqM(ySk(MBwlU9lr(Kw&SCl(SBnvZ6j7#J=P>iEIs}ReUT#uP9E^qbSLMY_7 zHnv-KL2qbPFr}UL1t%&S~27{5x=FYsS|kf+*03m;Gv@?jvX_N~zp(yq(wp zVNUAet%IRG?63WQ!~MqI)}=igKHJwmHP8P53q!?T4w~ETcLP+_t4C<--NqkQ68(qAXzwnT1kVp>Y0p!19 z8i31(;Uvogz}h+IZ$Yifwn8^6_qTER59?n~;pP%JZW^UE_M=Vz0OcxYv%qPt5Wu(A zjUW9pMICp9{CnY#5BNsy{9Gn4sOnJL+j%UlyhIF}8CYR*#hc8?BX1nyyu)0y_-m-@ z2IonzHn$L~rK~pSp<8Py-{r2|t}>;Ww-p#+laLeRABTKpKY@HY+7i1(Ez93KKK7?&J{8466lise^b*m4~i4`zu`8o;N448wVv^843b3G zfuxNU#DGYOaI%JtSb#Qw@tzA3eB$L!!ds@`e+u?nFN6LJxxF4J@b8FhwF~>tDKZL75vn`8)F zs8I42hxUIy9{8Vad#m`9!8*0Q%PdcEeP;nAp(Q-htQe9I7b9*$qcCMa%sU+l%i(Uj zqu4)-EE0SD9WJ5!D35Dhr1C3A7WO-Ku=6sdf90BxOKr!t_#@yahqPY=$91LXiKf}! z+BMs0()_a90p*2-H-MvXloCefU=?stYvXh3v?%5k(^BT}Df5L5RpkFHji%h=?CS6^XiJUG$9Zz(WtBm|AxjRMQ}{>Y^h-YuX`T*> zQw*^%)9!Cn+dCFSQ8AE$n~#+n5()y@P^-0uo}ch%OVfNn+MbP}*(KhVv&V3T)<^9~GHJGt zawdB*zod$^IDZLrBBPRyz!AjXl51yx|``3f4ju=qGS&Zjo(M3;R! z`FHBhuVj{~-^jw$v~LpWx7T_-~ zSgj77XZCAA{vUKyMs{F^IGt7ZM}Wl_Xc4j&3>$1i;Y1!G@!pr>{d>i*q!zCC*7{zT zEu1L?X>R~g0g!z0ugV!2TVpxH6Cq)xY!@`tV=FGV(c6Dt>w8#h4XySoTxow2;MK1@ zWiN=d$u0Lq1Q$Aycwkuj*85%U5Ks_upN0>C@NCr%%+SC!yvR9~1r}riFcD9-|XabE?HI zl7hitH1u4cA?+L;wq z9DP%$}C8+@uGVnYq;K8w20yg97ucJ@9T_&cchw)4aG&pg_mp{Yk4sSu6r*f5Dw zMhvS2g?z6tI|9wQ0IS{y@Q3!-hWt_CU02}#w>93KsZV!tHoI#by~M5pIW|-6OL-SD zqw=Z>s;Y-s3mhrrcsODuQj=>=?W^yiZKT`l6umb(tKsq0slpR?QS|&>uj#q-74Lxj zUiLTe>e|zB^+ce3S%WGX}_xGr02{PDW2H+W6bT-V*Rew>5^L;a|2~rS+Zr-RU}eNJNnfa_b6w z!FQB!rI;60l$HCWalSb5&X1*Qw+o`$Lb_xrG&+8*4xcL8>K7`^VyCp4Y!ec$5r+go<;v4rs6#x7FsbdyP{{kl&N-zDU?YZ^zZMfPy9f;KTo zN-rqJ)-F584_f41aJ)NJ|fuKu&i8xcQUg_Lp;|d}{F(y~}QP)I8>G zg+UTU8;-dC_v6>Seuu#Mm26#V)M{(Wm7TX=*UrbUm*VQzi;VhO&-(tgJ!|%i_?s=y z?BAx|yIcLQ#P;@$QT_L;h(W;S$1DJrK@h=Iaz)P^pm`- zc;~=x8`}6I;uWr~cvtLqc9YuNZ_WbA6KrFGKJ0=s`PVn#_}XoAON4cM%T>W0%!n%Q z#2*hog8YB1_-n~z8g{KbQL79ciy_(n0FLdWz>y@LKhbTAvWC}e)H1LH192Dx6TsrT zBB1{Oc)F$f+w(L&%GG=?uSD9urKw-H*{_k}wX;L>6f!Jr6Gq?}1dNg)mn9h-V3E_I z@En?FhrTCix^}bSYkhXYYt*vVwAn0e<6&zmJZo_RxC|~L*zC6eD~VwWNk##xT;6NC zgtJ>{)>CQhBvrT7riuhOXBo zX)W%pt!CeCCzbYgoH|J5$|e!m?gdyYzsf~m%kbyIkjJQLo+i5%hTSmsmIg_cB|ci0 zRNmzyJKenH$=ZllV{flQba4((Yg)@qw7#D{``-JWq?&Tjiql`w^%uUfX=Z>!rNKMc zU0y7!a9-jjDy|S0mv9nq$t$(+eqw6kLE_I2Y1i{y_=4f0hS}}4Tf3VWq-P+A;sGZ@ zS)xa{Ve+!C4dxT`HPPX_Js#Iux|3eg?X70a2o?YVoy>}^N`TC&LIWb@-HQo!p;1~k z3*qfMSkc!{npQTtWYdXus>IW-R4W%kA0}x~DviziwZ_afb<>QYC3#=|ANobpa)!1a z@bBSnv#od6W(UP146T-t|V2gR6+hEOK!hx`wI=qWHq! zSiJD2r=VL+W;F*BHmhR|vq5|1JSH~4j!t7zRg`?&l*^S01)2P1P& z(VYasYj!gvZz@F+vwX=Zc6L@`cc=gmD?`EFGw>&gG#NfEY5xEh?YvcYdvkX&pTka) z3Ecqlw~&!Yfj~mRRDIw!oFf@^qkdUOmfBwL@%L?K>Wx%f)%LN^YCaVxH2o^~#z7DT&A^g9I$5`iwv7}bxFo2KNZjR>iD3&2 zF4ZHAg5K6U9VsmDt}Jan!q#x<+K`UkM7U>SjPB3oz**5ws=ms_U_kupV@Q zTNivA@XN%uNj2o!o&B_ylgT{7S(zfY0bb%aW+nCpgak2s$9dhBQcXVR!TJr3o^(c( zS*uGi(g*d0|bf3V9?HRUa@V1FH~DuY+|Rdc(pqYdTz!+udpQcGJylC5Z?u z(<+E0n|ml_c4mAx0yV=f;B<`NI(1dQ;r{@y>uUC|MQutx5|76k7LygN_4l14$l8Q) z{hV8XfmMlT0}b+(wR3f?YM0W@tV5%l3qsd2>K3|ujXY2gk}ATH zDK8tk46K`1%UlLs%Er!)nn#QEVPmFfdUcH2^z39CPuVRlPQ*Ang2q&m#bb3=^NXvg z3bjFRv~LaQXGoL7uNEWwOjBK4I~y;v+bg=D+8Rq{EtuDHGA+T}6^k*|d+*(L`=9mw zSiWw1%To2_)LseE?X|l*O&eLC`*d4cEz~TQx9f2cj@+ZFmy<0a*r>j7l^p!uX64V- zwLKvugT;E!!<73qzOe|qw9&8DHYCO0<9BMj^vl=%e_Cy|RXvZ_QxWVxs`?R&wx=CO5qb7^^azh#o< zOE*uoC6te`mO_3|AA7Q7vW@JKI=E4cV@{m05u9GSuD_M+`s?#Tm3caT&D(qIulMi& z007px__?60=@*Ce3#)5eXwCh#mW!zi_CoTw2wv(mS(kf88v}ANWCiyf+*Q?$uZwjU zlEcJvO+1(OvRcEcyt-_nCsqv3%q57K1gKS|WePL9YcSij;jLH1Q><3DSC(-_rO4J= zTnrjJw2hVHE{G%fWGyJ2*_D)p45x-+(65Gd%}+=^9=Fn>oi3rd7n0n{s($LgUKw4~ z$cuv_su8%Vk(G7C;xP5<$J$1Gud+$)+iP8Q>GHXpAyQ4;v!`14n$FtdU4KlP?#jyU z2tTuKYyq0(XK7#U05JklBkTf1%PPYXZpqC%QI-uZ#@5z7LgfqG1+}*R#)PC-C62|8 zHhtqM9Ewmz7B%xjE(m3{2=LEUk-TE<}Flfek0SRn@GC4 zl6x&hBo{1_tc$WjWM&hDa2=I`@Z&6T^1DLS^#1@E+)bfDV-37BSt>?srfJeIEliU; zplrIjVHOoAs^A15t1Cl!{iS8%{{Z+y@3dQ++slI1ka@1aL|GnHMk?$AvSFKmp`+YB zQfk<#bnjr1CV>T79Xr`w3gEH7hbbc~Il{O59)&OudU_qvcN*E$)8 zPdCC-FO#^=qUBLkg*YzwIVT`samXj2HQ8wTcZRPtrkX3uscqqa6pH>;StH!dlItRc zVoPPRS3fG2RRmN~_-@|l$lAWWV`*k5bo1WgIRg`ou${X@64+7~fJnyRIs11h##T{! zcUzt-!#8Hp$flX3Ln4IHcZohpB`!#E2;86oa(O$pg2&wT)wI$zq~$PMNMe(#^4%V+696P^6K`JcR?3g-mrQMX$Bz zf9uorFWAX1GlJ1Hdmyk}$En>$6&WFDp&`o*DcvFfGGA(fxC*3a6{V+mD_NQuF8;%P zaIs49T?lQS;!qQr5G=OQOv`I?6y^Ma%*xo@08g6-Jp88}2OROJ z@jmk3ul3(g>ta-C-qPH?HRhG!JM^>GE~Yb~RD_w!FdH2ah+*ZBP@qr)C|vbBv%TTV ze-vw0@GM z3{0$~NUIiiGZs_xmfFMxAmD{8yzaGsujsQ}&!^eTZ4?d>WLbl#SJ=TqWV0&el#tzU z2OF^-61yss~xqE$gc?nRe@x}s^;ZFhhmg@{lpcDy_G|oqxxd*P83CzL{ZXdW&`BAv2c>tjQR3 zUo&wG124*l87Buiw>8Y`rOVS_mZ+J-p6(}0d+|HL_YG@2x_#JNR6%uaHn5iskgOd_ zy8=nwlE4l&pPH%ogU6m3@u!Ku;U;YhQ-Di$sL^Pxwn$OX#N`Wa^4*mHKQ7U^u#1of zO{9LvmpWFbWTVA4daM)9_Tp!lt475}RN+8Uytp9lBREja#vZBQsC*`8)8dNW>i*tn z;d`ArX?DvgL_1bSd^lFg1gU8V%8WAZ%8e`vhczp{oqc-i`Y*W3R3ewW{z0VtHt@un zcG)~bCz82uvT8Gi20g)BO7*$L&p~|rctAYSkA2RdsYIEsY)VBJzo-MrhQkK%GNhDLe24cW&Qc#tC2^)w! zky{@Q)510y?Y^txjSAl3EFbqT1giqKnBi5Ns0`eP3;_q12d-;r&QxIMtL&A(At_U) zxomfy3eo%-qiIuVGx$*+S7*ChokLJ*LJ9W5(P4J9p|}_YNM~+BjMmqQeja#nE{~6N zn04d!>xkXOjEOTzc$;_ZbXLI;5Xy=+s2~Ctg=d6(1K=HZQ-@yBH4>1KEtRAB2q#IWi7O^ z`@2+p!)oo+(tadf=~huhd_{k%Xc0p^QE7L2dohxAbvRP_Dpp4<31((0cc|RC^=}pY z2k@ofw(&Ng)p282;^XA zxC+a`T-w#@$k z6>P4fORJbJeDQ4`kpz*rbd_0&*czz z6%2N-Cv!1elF!&<5L{`CcX9SuC58_$tg8^68J%Q{aF`6AmjsS73o#fxwR+ygC9HmD zG+dq4w=^Tw^zB|h?D(`)foqvIIRNC;H zINspErGoAbiZ-s&7XYw1185{x&w+dw7mD>ue+ugV01!0a4r#H>uQlGgs>^h$t+(cm z(lO>r+eb_+Y7^#>GxKcgJ_xq(ufzMTYr@_ri|jJQ&3AF5vqy0(Ta^ls5lY5Tg2<$? zBrb5Q4s~%fsYz3XoSn4Wcj@;ftNcURb4N{lzY;A6!agL|Vj76>nYdj_bV-D+QDQO3 z3qo3G)s$r9l?>PzBmy%gv*9ZtWV#j1A}gr1D}g22NUJpPI-@JObB(_#Zp* zk4S^Xu|&3-40jS(tg(cW7=tN+lrl1wW$BVO{o#^1&Ia|iqv5|6S-zb&g{Qf^5@IWx zWK!{Q;?vF#c~qD$ld0Ekk3kIQ!P)}AV;D)E1fG<&7G@m{*~kQLx8(K!Qz7$rkwDiOc~YG=8w2ja)YYp)e}>rub(_S@U3 zFASDeU@OeXs=HGoaM&mybt z*z1H+cZKnUAt=?&pOz&xIh1)IV@ylZ;`d5YM zKeB$EVXj0xDS1ZKB3wTs+ z&#(M3ldk+#@uz_PHok@7OB*S~{vNfzm|t9ZVi6Sg5j!Bx8JJG1kG;UgdRM*t8~*@; zf$A~%0%h@!gCL3srNe3(tP)9X>m!9 zb`Z-OD&Q_aEr7p!t$A6tUxmyoN~(^puFS%wCY13LowsM(RzDED9pW1sTwCdym4(%q zoLLmPbwy=$2YWG<*nrQ5DiL;_p-Qo}r+Cgc)Fallv!|O|i+JvBmMt>!M7Ay!928>8 zBpG#CP~+trP6<=seir?yz7FV~9FM~Kzr!s?=0LKGiRF!6JC<}*RzwdJk({pL?95L6 zt+`PJcHSuX%lj~VX7J{p;H?M5I){a1w~j}N?qiWp9`&GV`7x`1yOQ!c*bh(bVo{vuP)u27q14(?6D0LNg22?PGXKo&KaavSlcQ#u-wUB zFQ~euh9DUNvlMAJvW42L2y78l@<}KrrK|3( z-^=>BjG*H;?Cj6bjZfoGiet0U$Bpz`>mRkWS24?fVI)wzHta{A0Wm5|>lhKZyxG^#TYCvg#h5LF?Rvye9iS3ePWXZD?s z_I|g%xcIp>-j}RFr!<<3Ge*9A_m2~^D!WgyP!b|c1Z~k0K~gsT0pLH|7hCaGzpq(n z8g$ay_UU7X5V5n2AcozY6 zB<&m8UWeho6yC14b@4LuPq~dHU4Gj0RF>k}W=PRpV3Zus=AuO9O0~s34u$+g!gJBb; zUE?lUCDkUaZrVSZHd&@0bhYze@P6aoynEnp0Lcs8_-FQP(V@_I(&J`B6@-L0&8oLA7K%IkW%jDKmnyosP>o_ScZ z#UAnl3@VMmaK}7%g}y)Cc+Ww7UrzW@r_c6QgH`si}kuM9@AOVK2)ul2ce!#)hQ@rA#LHSIpqJvnBI z;q_azMi(|Qd5VRCM}khQ$dSdk5l_Sm5M&3mklr4 z+*-!1D?9wm?8;fUbGVRjMhDjK;+s}$3kz#>wYRv2TMZ)e1&$a7pz;3h;lx(yC(Z)~ z2XY;QAw#zBlRs*EDQ*5QTwBi)qs63Yz{a+w61lchg00R#Zb|Fbzd_)t@s?{SLvBa- zIQ`p~hW2)D8-Kyv^UFBUL*_}*a#(v0{%xSKV@<93ZjU$l)C z%`UAky!k`+!SLf~pB(S*(@y&|8l|1B<-9>QfTl4K1O5dG{Bz0cTqlC%n%~Dd6~rSh zy1kPwS2-ed9Xs?D>%X?-mKvXmJU6e|&aCt3vcWhC+%!|}A2B316JT@vKH|J{Lzvq5 zlS;cd9$ujXe8A)7nH#r1m3mk?Dws$#`djomrKGHV%YAXBYPT!ltILGagccZ{Tgfg9 zPa{vgZi*}gfiA=;MRA@o#1mZozm9aBCttbJd_8c}Lu0AL+J2vWA)X6)w5`?c(TV!irZCfLEPP0TcxzAmoBWwIV8&RF<=wR*~nHq zj&FWCd2&qUW$%&iEyd# z&Ns4Ju2p33w_Ub=r{;K780VRcdsL_Y0Hz{zn*OWdJDoxu63fIZd1G&Dadl;3XpJ4C zIgiSL+^Aw1W)iTB94fS=pPgQ_UqGHMYR2;C!*@3^+v)yhp{eRRm9%DCrgF1EF2q(; zEwHMnIDDjqj@7N3&xDDn>Fwi0@cpyBu7a0#_I7g&T+GH;|hF7hm%V~9~>fRyM^toYeEu&df$ge!89&9LK3`>Hn6rYt!`f#OM zP=4+ETep|rt@P{A^&7goM;GFqPA?B$+~__axv{;2P?*J~>K7I)>X$$3)r+R+)tyVI zNdS&kP!uqX63-Lq))Q!2WvloDRI$5z0%DqXj#Rr5!yK~i5<3))(qj;H-R1u6O0nf7 z(V)53tf$g^Hpft(P!<;UG07^4Z{VI*^2fG?Ib$LaVe~cKff<{{Ua`QA$=;zUH0ihflb> z)#ty1O496_9Xu|fsg_c{OUojmSn{pq#&f)eY-4+u$;Q%jO*>Dzy-yPOM^9}>P9oiI zw6|F^8J0O_M-yNen&A}SGP3Rn2nwn;Z*CIv%uo1Nw>R-!tZ_bz0 zcWZwo)xEOCH1R7D+0IyL`Vsx)84xnSSPP3;2)2aCmpb9wD-~vWEGs?8`^C zDWcuB7C_3Afe`ZofrBqPwNF+*C}_oU_QF zyD%-b5g7^?FqCYk!^>ZZniamObFse6t$BH@+D|Bo-a!;h@)Ny8gp4Ucw5db#Fh$*7 z{(Bo=5NZ0>m#yis-&#n?43J%aq+BB_EQ>Kih6X2=S|01q%!ROOYWB-?-RI&Jrw zyUe5E-5)K(4b+7BvLwj~@{yw~LvIQcu~K3gTt4L0Z7S>H4yOgdwPlIbZ6**(WSSTg&5=+z zbxV74ixaU_ZN-)$w6@{SX?p$LjqZ=)4t>5p#m%<`3Fmfqn5tZMlSFfF5{unJzP;yS=wcSrh z*3IvQ^k-Q0O(Dg^y2hP$9#IsEuscfxYT0AtN~?_J9X1ogReSI>{UXa>@gwSboZ5ZU zSm_C*+uN~nTQW+H_X-tC${PyBw@m8FuF9s&`Uab#_>py+7@`ZL#vL?dNtoQcgxpCa zK_o(^HwH2=r9np-MP@G2d~1a zOAX5^@?L)$L8U>tEOq1GMTwiH~TIwvJi6$euIG&-Jw@S(7~j?z}1+B=J7mEm_77KMN&L9{*#D=Rdp$7Td$tJJjCN{>| z-PQj9;Q`b&y*PMV#abq#WD?aZCbp2l4V(}KatirMzDqQ!c~#3ZKHM`CdfUVrhP9&E z+AfZ;-0F~DS^cJMt7?&%71c;6vZRJ*xIrVX=2<{iU8QO*FX7eoirZM;-D%ohopX6; z&u=^l82dZ|7Jcg@NUB}q5$$Xe2_?3$0KOVamMhlYTWS4z6Uy6mUw`$ni*W~sd@*Bu znnW<8#2vq~EK2$D#H2r*3p2>l26Cq?&Q-Rl+M$}Y!s{A+y`P7?Lv<{PGh7=9CbgO= zWmJqIXht`AD-a67im2Mm%HCjFrQ$CPc#l`rCh-2T;n6RZGuTfp)DHx>YsG!aGO~Gy z$cn>hJ5g14099;i{wMJ!gW(x`8L4X8inLdo@AYO21dz=ew)c2|X-sAUfJO4T+7*Dw zMceAsCkElB--q?v(9P6wcCq=Bt9&QYZ6%*n@b`ZujO!nW5;Lk3cJFzm zKpe9dEXxo8Z(uf(bL`Z|$_n?rw}02(a=K1x>7Oo-3u`*9vs`HZ0Bwd?h>;oFYTPhW zx={Oesa!Y&uIAdK<<4un@K?ouh+YDX*Gus%@fi_(22B)N6L!o#_QHM=?r4nk_2(d={fl`6y1IxkMK2f~#N$N)};vWKdn#06bdILps zE!Dl4xw%a=iBXw`-KIgoaHam~QH+hlsi{p)Hkw-Z{=crIMiQs*N9J{Z5dJq`c+FQ- z@eRGY%?v?inXe3y23UfLLO5dy3m8&R^f=&xE6%iavemT&yVi970N*Es2w(xD^O=lI!3TF^P1L1P zHl@4%zc0Ma#(K7Gc>2OS32tULlgfreb#HW6&6GRsoVpZNDndBk%((_bgOyQMW$~wv zv@r=-yJat$9 z0E4MAi<9?%KP}F-)5o6^b(gju+xO3Uu29^^A(nW+iIB4HCwVcm5LDrCr{>#%kEUtb zwcm>*nmtO+Ylx>ru(H!2BwVKE`GIY!u1PG&7b>XAcN4VJveqrl!M#|oVixhFPjH|* zgSmul&)qz2^+&<@TF!6DjMjH`YHCkz{mpsy6D7dH6_r2|At)baV z5TgeKm7iX}ulx_hpG^2&;z;%M@j6*eeXYTBbdcG2ddhWMRujAJm@vW!wy-Obtj0p@ zK?-)G@V23-_)hO$_^IMUG@4A2++WFPQe(Y?I?Bm`wlMp~*vTw5mI^AB{{Y2}6lz~( zxzkfmj!OnfCyruRH_wH2IXP9iTI=9&lcWziYWDa+3l?_wA43o z`AW(ZH_Z|#Ko}#LqxpxHtbme8PVf*T6VE4Qd$YM8+{-p% z${f;HZ+$oZzf!Ll!|!QbvSI`VAnHWNF-qth>^0psmLYH?ZcJa&9!k}x#52e=pHsU{wtfr zvAe?rkK`b9o&@<7m`d@I`N)mC;4g%`s?S{OMhLp8*x#&%Ej=8#`{UM)CxW2#A?vmm9nJ4QFCXUi}PhC%YN+(Rw`f}nR^ z7WkpzICV%L#GW6KZSCz!N|7T*vB7}I71!=a7dwCqNDIKu4wdb@6k829Ly~K`Vvc#A zRJ}JhFBVK_kb+f&AS)FrGD#b=w6ztnc#4|Tij04*w@B4GbEmGy6QWu8@4~W87lyo7 zf2TtRZUWk9@Dffox~THnMJEM{sm9e}cI_=%I<@b_{{RX(d> zp=@prvK3Z4Bmg@p3!SHNQfsgHf8q}mY4!=G-)MHam8x%*RM|)jmWs;RGLMntW+icg zqiYa9glKD@84rs!D6}nmOoGnY;f>o=w6jk<7FJYgODi`B3kdnn&fugQR@ryuBXtNPR~rf3Lj0hN62lw5IfLP? z!llKQt$K9qxhXxwLu9*x;h6%n1#ILH6?bPjYQ5smfnGVi(m%FGgS87Qc_v$^rk3_8 z6-4aPB(4OkRLi|i3bAa3!j>Rn_&?w;jI}HHZ2U{7HKJZl{{ZP0?6$Mos3A!cvnYx9 zr;oq_PRn!T(0S~jW2sTKH`J`s;U(f;X?8AFx&=f>fJZ? zEbt$Nv^Jl^`mLnbcPOG*H7^s*dmIcmsbV5SETxd3+OtK8cH z8~N>G0y}%(AVnttvw2EYMnC{2*5|X%}hzSujT;vKiZ4-XYpr^JVU8n_>V-tg34`H z;^NuTP>FF2lJDnmky%RPjiZ7=9Gc;@FND^997sR2<1x!<7woRuh${eIKtNLwfH)gK z>0bUvTBNvh1qi*U(*FR{RL>`dvnurXUja=!FJVD#ukdL909qb97r@DM`*>|NKLaFD zKJt9Px2)xx8$^w`g+D3zN|VPqpFK&2H7Nf3x21+bt12U1(tBocFq z_B)>iuBB$5+WM;frO*1eiB3u7h$Jp}JC5V|S3RkCA4j@uE$^Jb60dB9V<2Q?737dd zx6pU!h8EkM^Ta2y&UkCbJ}>xhtuKhYFE!qgd;rbeq`(Km5&;BZun6GiBp!QLvFTs8 z7wwbrC&Ci=Zrj23y8IEN881`A%`MQ_H!&cy6SNJ)?ZCk#fyH?x_keElCbx!j0%L1k zO2D26Dt8l%=iK0QseC=+9}{?MP>WOl0ECBC3kKlHW30_-EL)VLEOIO7b1M;n*Ky+% zRKil8t|_!kD{HyWc*669sGHtPN1N-ZAoNS1YuOa znj0gWoxJhhvMv5H{CLq(tn~i?jGj8SiGwJP+r~F2p-CA8Z(Q&P9@(s&DYa;;eg6RP zOT9bl%84JuZ68w_ZNG-E^@;7|U~SHyIR`yJRz+pcbBrD<)jk`1P4JUnw}a;JH;8;c z;x8zft!34r(<1~f^Zx)JZn(xb0K^6WkQ%(7Soo9iS5TZ^P4Oqi5>4jdPj}*rqQQ?` z5g{G1objHOgRNe8qr_T?zwuVFaj9KkMfQu`MXqNmibAgGH!%u6=*|Em08uP2w2!@~ zrT+lofmTp@9fyzpE7@uOJO0pz#k$q@qhRFQ$ZjmPD|m_fq%yEp4?SKz1$| z1y&46C1~mQIvFQVo)bO0ZC$Aqyo?Srk|#pI1CEc}bKb1aqxeGM((3l$#c0Yx#LFZ_ z&Pd>+%Dfx{l>;ZWXZ1Qa(l%a2{dS|>-1eUZe$ZOQwvS<=X_`c`-`ZNX>GT*9RMe3e zxL1I>6`zrbSd~z?*_Kvc7uI}B`%3ti#MA2D8`9_1wCV1P+M>!5N6f+B_i?b~ zmD)i+Hfmcx3pMCf@Yje40Wd9JQn7N%bDfIs3$f$)RJIQsnq4nj@h*?3ogYfqEbXm_ z?&nD5dj&-p!j~kf;PM%d80rU3N0eZ>`MDMC)f&qf>P0D??}xR&+ag~K={NdkzyO410OO6XoH(Z$43ks`3%B?&7MRE8zjgU)f9 z^6?oJI**nyi(aqt*YqkggtWbv_3}M`!oToRZvaWJYAK`G>lfCyZxlLguwNvN6y|0- zofQ;rXvk2xVU$tM+*e_B`&WD@4`7QYiY>m$t&w#M_X_gcLFP>(#!aN2TZK~!hIo;p zZT|oT z?r7%iWYfRM`nK=(zW8@)u?>te;rCAVto;4XLQ5HsS8Ot0hux=~k zcZ%%%dGP}8#QJsI*A~*+S;Yji@7&21v}iv60L_n=jP}KR`Qe|2zY;Y2ZqVv?F!@TQCuQ3lg@Wxncwz?b zop3AYxKAagfW}p;9@hMOe78Lu45~^pl)RVfTm7GGVAOs)>Fs?R8*MHbuda5uQlTOt zPESvsMCY*QwR*3_2lzqaolsrro-ebsy^qVD&c=AwCBJDD1u(3~WYKN|6LJdza1pui zpNIBQYQ8wsnnK5GA|!Ed-v0nN;YxHrEpVDgiT)e-AI182g?tC`Cg#cqmF=D#HQk|* z%HJxaWhIa($X~s?AdD+=c=*!{@p6Q9vcFEgy$$mjC{cov(cknv-}bN3Y%H{23*Fh> zo0dOnx0-P*MH$pYSSXAts12XGpn3oX2Z>$CVA(M0j#vZjgZ(R_yYV)&;*W-!CFZSt ze>qEuPz%|a!k;QWn}ET_6o6S&Mh51_Yn{}W%!DpEY>W<#gT7`c+k0agyqh z_aq+>{23;%uj>(OdPGLT0X5`zARB0Aoi_`Kvnwyn+CRo`iWdodmU=u$bpHSaT2G}g zjcwy-Uhd90+W-$ZGVl4^K3X{wILg-@;vd?E<5bX6EhpjNvsvt-B&Fj>C%r{P6SRB2 zU9K6jq^hfyLU~j8eS8)p`)3;#m6cD<3;O>6f_`sKwBw#;S8Mo`{{ZyFPq1|V0Er$X z@bOJ}UfXxSs0I zQT?NQVXIpGmOc6ov8Te<0@OsXGMOf5%nc^#WtsO%p_~YcL_;^mJ`w%2d~xAh*)`7) zXtxu(#Vxwecd$yu$}+XHWe!ef%8aY5io})*!&656tUNKO-)`|dw>lgeMWK5usNs=~ z%=Y3#8IVK)KxdT~aKnXhmTxR$vV|;08VynVPk-yRzV3@t&~+)vCnc%%j+^2QJ}(*R z{vXoyzp+6B$pyW{@G6T*n(9R`AkOGEzTxJnU8;bhvyy9GTbn-)_^muWVW>-Gd86JN zO-E3$xO-6niPlvy!UB>+E}3j9vSmXJ%;U&@82m)g{v~O*^XL|KcN%r9myaQ}yo^U_ zG?OWaG>MlGZ~em-VfS}@%*BYlIPoT@;)}fpSJ3WmyhR12w6_vj>GuW=IhAe}K-+e+ zz>uBGJ{T!=8C|N)l$5A)-97qW^51PcTYFg2QK;gYJH0c<8l=7|)8x|JT3SA;Z+uO2 zXA@6_nb0oDkgIvi5i8{)$&wpsB^cJkw;C^uwP?I1i#>$bQVT6G#?c8Qc}B%xnOQL+ zY?YQwyD~B$VDg3Gygwb5t)s=`-w^9Cc~o7+%`8YtKGcn!l3zb^;32?g zP^WWK_}5`+;;YdwhmM)6*j?SNosN|VxQopfNRgKk9k~x2tXD0P@3D7`o@wJN(Wx~G za*~Tq_5k!i3%^#9cG4}qT3YLEZnX+=ise-#mp9JcHMjL`O&=S0`%AOa#CG}_RPswl zXRIu1E$oCbRYiT?YWeEwtDT^!%C6ijx*(s#H;`-I8qtefS!uCHsc61RteSF9wgkCA z2bbnD07Po1aX_uOp(4JapV)LywZ5mVY5GBe{qAOHQ4h+IMv$y= z1zAB6vdAM{&aAa1_r#qu!?)Iu_^aVv?z3w8TvE=Ot$>Ofd$=EWlmrMwV`+#qO3KgY zbp=$8sLE4LQ&zu5mYQ|d{J*AaI6@P*GHbu}sn~cA!FmV7-wf#1o+t1}igk@@L|aIr zu+;5R-sVlA{FaH}5$;q>kkhzV194)kD?h}#w}N#a8cPIr*E(uS5*sMIN2tWYD=A=; zeXO!7mKpmum`adXC|4zw7}c=xM!R+5Ei%tpg38_qb*RBuk)+tS5X@R5=8z?mV3s}X zrqRpW7BLnvGf3~fq%F5zXLSJP~~g}iw#w_5pPTZNi6aVTMkWMbuU zxRp}-vD(4Y9^1w`zLRNXYi0IzYn%T7wqCT+T0$63Ob21-WTOg51g{Ob7PYj*_?O}Y zxA=d51*=1+!EPUjvc9|6=5*L)d#Z@UA5ajOKFJ9_W_>TKf@yCXq8!Ke6hAlSI zZ0$t@7VINs*bwiP)j(A&=*++onKjP=Ufz3+-=)t)5q-m5g18d|O4)I(0?%x~pfp^RX{ zLPQSF-T9!MfQ}%OLe`_W)^#awB3SL!q?b@jmXUQ8bYbSi8*$nJEW>W)DtZ7-blwuu zHGNUx(&yFVw6xN^+3s#q`O&TSG9-RcSrx>Aw*nM|V(;>(;M^m1Yg+#RKkH5YUO=UO zgc=I?W9*aY%c58$!ZwWQ(ju87y=*B`=)f$If%3Cp?reg&8>P{_ZKdfNnefK1Zw1Zd zwR`=1!;4A#1Blz(EDSR)fl63nJh>_p8lk7;<>Ks#J~z$rUQsSnvW z#aZ>Y{154sDzLY@cedJhgFYbX^F@89LunjwfcCc!sdxk=7-e{>J|R zL``CA`5E3nJ+0&mw8qM#Yk9FKMpC1dEH@~^u&^6Xk4U;t?a6HI+R<*W6(RYi3``+R zjkS<7Apiln<6u5!34_|rO3~l?>-|{TGHS`Ke)IF2MA0-YGWPm^1nXLio+O%>N zgbADF`Ib~*gq2Xmis4uv6tmVWv|kV|w{xYLCtG!%+zYQLFm)$p8*-IoC=TTi97P;&wN+r{!hv3Q zt?E7~@a(2*+vL=tww2AbvOd?|st@nhPy&`D;9-f|$vbxsbS&mCUtQk>I*QAdty{w;@GYg_5BFE4LV!^SRyDkA_HKyukJ z@a5AhoWdcA3-ght0wM%f}o*2}$xOC;WzG$u)ZKQ?^Xyt9u z#s<=**pQAl?I)rIBPD{0_8mTIdiA%h@A;h2-QPi<5M$8w>&W2n;AFIgOF;`VpRyo# zIV2G7qa~Pwl`K~zfLkv^yj!odUFrH5wrguB&7^TM2~*|VndlegE%NRsZUb`rjk}n1 z{W)X2XnxQw#OW|7C&PjQNwrRPC;)~~+pwdY5_P;4;hQ~b?$=EFC9I?E5v{}BT}qLV z%gzc0AhKj`!DSB06qQk0zF1REJFinGDK@SBbQs?VHA~GZ*G9J01e0EXXNnTi%8W)= zcONSh4!ee13>%YWki(tZ3SH zpKbP?M?lLgZykYovALB-)N}-#f_`nlG0qnui&ibeOK~NS+Vx)|>PM1KHgKC|*|083 zWGD(jUi(j6umiC1MaPFVn<(`OZH2PQXXi;2&dSQEC`BOk3^t#iu1Ucv1#&v4f;5%C zjw@YA$pmcj+v(TNxFrB(LhH9^cHS6_9l1NQ*&gImwyxg4BbiZeu7fRnVXf#fz5U(5 zxRP6?cz{OCa}{=GLywf5_))j6PBFEr%XrVkwl{MXn&Mm7gmT?Cleovn1@PF6sn5*2 zvfPXegKJUnbb<*ag&N&&$(F?sGX0U0=T<^@6%1l3yNh)ncMX|*Kk(mQn)+={(^8X7 zwS_bNt*AnhM-YZES)En#z%h+gPEO`igUCA5qfRRB-;q|5nKqfN>ej||u0+YoGEWpZ<|3oaEv+&{g~ zU8sC0I@XhY<6U<~x6%%q5AM+}+VT092YRDlqcI<(t^+fMrR{Fb)bx`-=r|6u1naZo@9p23Ll>FJa-`QKi&8scs>)okL%#kwUQwvlL&xRY_bB zrHZnVwa9Np)8f)+@cpNs;~SVPVzrUuRge~$eAX`@C2)j?MP|TKz;V#8B&)_U<(9wK z`G3O-GMmx8PA(q>YPzz*!({5 zSH%5aPtzvTG`T$8N*JTl6%~NSc>-z8gc^l#5%R4urQrF@~#7Jbfu<=KWBD}u2iKe<27RxM4BxRUI z7#qQ4-di}vWMEUEtP5B*4;NXcwS9Lno;l;57$9B7HZE1w03a)-^5t-H2+rK$y|=}G z1il=2gHT(Ge-Pd2lm7r^MSk*F-x=b)xOLdmq^?Rrc7F=|s{a5p5Xwzd)czfKN5TI9 z3}n+ZpNNv^x5HCgi)kLwRgegYl>r|!GZPE4U=?LQGqL$=nw=>)sM_5>;jj1_xVyC8 zo~NBBivA&KJ|c+e8pfmNMj6K2=%sDlKnk6@9Xr1c>6)IGY}Y!gZwRRzQ(dTJg_(jW4;r$^ zmrt2MP#J=TRm*xyG{nXiPx}0gpEHG;`Sm>m;tj@=bMY5lNi{2i&8yy`3D@N;vp(RU zH{BSBcDDk)%6`B?@_Sv98S`s+bOw1-cuE43^tw3z&x&d zj@b)`^QbJB3kMxlDog(WDN{aghao4!cpv)lNB;m!(LG&$%@PwF4C6UZwS|$7ImR1+ zJ%)J)9V=%_(5yA9VJ5A0_neKk%N%Tj10)l-laMkpG5DJD9T(ybucumlit;#Sm`14p zFfdZgpq@7n6aq3>@Bz<2POKjpbf;lz*ApyjZrdVQo6Rgv1{Y{KP>d9uDKC&PKpyJN zu&rwB&uTI5c6yeL;plZ|g>;!^c1(tnM3|fmmH=P@k}?mcy<){>CC%THr;DhhDn4bj z+6Zin2j@6A;B?68Te^O;XK?_YYshV*7Nx|Ok#7tXNR{;B`-iejr;GzBjfK z!oU?Ta#XHQ_Z(zro^g+Um4$Ke@*7D0(RX(gY8xjpf}@T>`HAO@bv*R1q`dFh2Kz&} zvlIBsPM+KYCC$d0r`#&>g3J)B<9A`n90GBg^W9q4z^id8_;XOWlgVcktH4pm-mg zjpbMt&M&uV_Z z;+-GIP)@q;k)gwOiVA;a=&NjY6$^a*~uxplmNBx(p-JNa@_l<_(DbgR&c-xHc(1hBa1WarU9@uGk_b@B ztDb&f4mxmgUeV%@+26peA`3r@dNt`?zFf}?u@XTU!t5n;lZHN+2b@0xdNKGI)Q*HihFLv5w(bX9c-dVgNh< z8H)^@jyO&iS1K(d2gFUQX!?TOUw9%dDGt|Du30p(F~A_Kw~^-u1n%FH$30DT-W=3E zD@~z|a?I;*Yi$Vg6F}6pDea2<#TWMvc>sn2=sby+c zG2U1mPB_3-IV&!n^TU8M%f@@;Rz$Yu=-g_v*+X(fgthx=dUfT>wg&b$5SuHt86W~q z?4FE9YY9i4Y;?t_?#nkm3$^if$XR?neK1gGX+N@Ny^TgxVJ)T8F4&Wgy%RRxdFfG) zh5E0AhTTU{z3}8RNt9`|9YaY|g1I1Sc2Yxv0U&*-f!zA)+IZggO=wnc5no?hgjH=m zeNK6zb~!smzO!w45Fngw^MD7ZTDzfm_rn@&rS!y)NR`G2vC!_URGr(i=4lt<0U!16 zx2_ha*-0_;#Gm0;!v6pn>Ru_g*7V;SUut?(4A&PcePspWIaF*DHko~Jt+kjDzBMBm z8*poC;UsxXlZ=wtWBSqH7?W1j@4QtV>{n|IiGgktQ?$D-UD1ICTT6SvoHk1rBWXG4 zX*^YO_Ih>Y&Uuap*hi`PM^ADq*~_8HLCK!>0?!Ps)xXgU_&KevgW+!y_@d%Fwog3B zRv8&l%0|usDpcp^?8B+&n)Ckv6%P*lLAtZExA3=un${yE`kdY_(;oLxyP1juvg6DR z7*&cq`HbqtRathZuAfBlR+-@6gZi!Y{=``)c+8tycGvRa>WmwqIQr(eFZfJ+F?VpU zW2)QB<{y?TprjMVG3THy>%(_A?_3z#Fsn6mWt-wELabvKvi*PH8uq$Ak>OMtRjsU0 z%c)$+b|SN85P*@!xe?^7Ts&$5U>&58yb4?B!=jHdRkwVhjsqXBLHO4l@u%T^{{W3N zZ8r14J}4S4qo!O>c9#}5cZ_6Lh(=d*QpgbUeqpp=kVY%cJUie|ifQ6c_(gnc@t0PD z7Y^3irJkX4He&<{2!-AuF%DKfCS8nJe8g=fD)cZ|bxpfi%L|*Tbkt`S+Uk#_tUuu; zT?@n)cDK5Gu*dN20_pEej(o%cqjjl~SkX$l5XhEJv8?l6-SZxn|bB7~Ca< z(WR^2SZcO!ylo|765Git!wRm(^EQasDF<#y9S6cm{4L?%3i!vvI<}`5i7jpXIvVTy zUe@9RBEGk2_ufV<6tfq|CuZT#2Q*>vcZR$@X?tbh?+58V9k%;vdEmFVQxYtLU`Z!L zGs;R6mIQ*Jf_cdN&R)+EonvI;z3Il=-}q78-rt|kL+5Z+@R*F(F`G}B)TP?rz>my* zid^4|YmH1?PPY&#c_oB-jy5*+M-wPe(kh^hc7m&uhAOM9@c0VnPPNhh0O1|9+YBoA z;v(YZ+%{I>;md?ZRREv^8_RBEf-A%0Z5rYm?M@#NXi(}Fu7ci6w)-f)Sx_sYw#}Q> z!paGBYz8Bd^1DM9sjohvduMTLZ*ORZ-%`K(f#hKuz(Te{1`eYwh5?EeQ?+z-BAwv( z-^;1o?B0h_;r{>@Xx2}w#bcSMqJH@$X8sI1xD2>!;+zfbiP0FLR!1(I)p|yFv%QicbMdzJj4vixT_qg z01yHf+`KU?{fcVST|G?Z+etQeUKIF&sQ5=txVwLdR%1}rnnQ6V?bM*eOU?=i8P!~_ z;89l_K>%khjGas3{vOEEx-N~uKqtOh>tJXbxb`0mQyODo&GIyk1cwp0PpY)itn70AW| z5zlG5$BUwrY7$yarakq|)Nehg5=bLOkM|*pfW#{81O(jjF(eEM!Qo*!b8@BqYvgje zv*+*qcRtFO;ueel00>e)_WhOpxw(+t+FMO2lI_?=s~41uvK1yM@>PcIg_(o1R^NdB zI(XK{U;f9^yiIgRo?sEPnPv#+Ve>FU1PFwk!5An~NDm(2^M`~!7+v_kz&3vp{0;F+ z-^4LbV-@Ayo}GRm)HKIMP|(~#Hs+bbfE&tVwON=UMalH+&~7|0V{vt1 zr(9}Y8@`(SLvJKaa*BMYlZh3b8N8-#qieYUmS(BO8uL98>E+-0^tryi)86WPABa9I zwB8=m?$W~U(*EizSDw#Mc42KCIDyb)hJC2A11|8auC4c(w-J0w)%;B?`s{ZagWp}x zB)Vj_RuPXS(TmF}M9NFAmZYx58@#y{@<`h+fV2x+NMzJJNn$Q-R@&lCO5WX6!2(G$ zBNG-Om0jcV;}M4&SyjeSRhzN!cf#bCUAj+*UNg7V4b_bClVs2=w(!WYrM;|3g<+Xr zY(=4EP^IMDp*NP{ll|*)mY?uC{{UT0U6$tvzZLv_85a`R#Uw z$YoNjr2LG#XzBh4NNggW!HuQnnH0~Z>P-Wwnlh;gX+om8EUH9mp;bJ#QMj>YhfVOd zo8j#NV7k*IOPhpoYkOeI#BATSr4WFnFj$?ZE4X~v3bn^3-PBuu*Ze=|6s5a6uY|gv z!p{V1dRK^kAb1G)$4SzB%bib6vy`1eRY?>;=_z$vhlq#*`K3&c1d7{8RT|$PJTc)| zrXLbM5%{*sUk}~P*B4PisLy4nSW40{MY+;u9%M5Hc^7s{F=DDA+*I-35A=;Sni;ig z+rg)a)ik;6H1(WG45}4vNkNs7r9ggVJ3PeQxRBM+4~WLkPrA^xNM~2_Cj@lhk+gDq;G{=VOd&ZU-e8mjYBEsRo=rVG@~^dufT{~CGgWZg2p6fWir zs#KgZ=bmw#;?z~I{c2p&lDu>|l(_LXhb@~`g6POw&b5tUK5Sc-Bx2}aa>fxIeo^2Z6Lbdsd6# z+@2P6T~s1k++AEvav)PNh-|`=Hk5T(+i7NSMgtPMe-ylb;0ZikBK!dG3;1tL@ZFKO z+vLNiG*ibTw2q-S7n)E3XJ*_OVD9paWR)7cu6It`F8$r#>St-irq9jO@TTtW&`6h& z+RHMuP+8oC5+Wq7!Qyks-h?U2hTfS9!9vzwiF`HUd;LP+OVqCK^xJ5y))^WtK4Ck} zwIPZ^8-+WWMt)GLc8&?Jrmws?;y)7UEjFLwT{OmGBTKiEAv|h8*!wG4b+?p%ddP|At3Z~>Kv-O8x>R=GbJ_^ZI4AGx#Dbc;y!1ei2dCVR9I z1^H89LWC($Kw?SVyzXK^HR($kh=Yu2Lz(s7$#?00Ly~xDr_BzhnCdsp;LRk@t)pAC zmWX`8cOiLW1=X06b}Hn6s+(Ab&T)X(ZuO?|m7KC(M{_i~YC#g)Ph=6EIgbP?E?HT! zMjHW0E1qkkvhnxq`{Mg6wDFFWufUVxQacN%B4mU}fs~|(%P8)k4hrOQG9Tf6OT(Hx zk7K4^#dUJAteYoQl1QO07%Q*?aRI{;jkY{(>$(+{Li&FeP@AGCf;alK;oz8z) zx$!h;BED<8tXwK;#R5Ebaw}j&3T|$N$C$t_;8Za&u&kdEYo0GgfZup-FSJh?n%nzM z1x%?ds>CS`A;=jdoVM`8f(NMhZ^m8%@b`zVZpvDXPf(o6B3js6+qKQJHhjS7pynVM zf<{y`D`b6*@)toYOI{tD9Ww2d=tX>Dn7c+xz@PRShZWx*`WLW0{=j!z=G&2zywT3Uy+ z@iKpAYL3qx`41|u%&n6V3>1xxZ8ys5V*vT#J3kW3#r|*-ZjadSC>w)Sk1$j(_@hT04gbw zNNuMfc>#|HqrA1&Mcg+N>9(6^SrzV)RyeL0hu`xT%K%uCMnDP)>P>Q5uDKw!cy8@& zqk$(Pb*b5IX<~*cX6#&Qua>OLb*!=jrj-LL`>KjkHrTw(;jW;BFW~0V~+$m|%R%OQBh4 zx}~ZWx4BrNc_T^F%iypFAPfdzxg#V711E17)W3(qUL%I)>r&Kh@f%2s5kOiav~nGs z3^NU?7Xcxspa>9B|5^qF|~@wo5KZU{A^zu6B%M zWo7YI{kz8`kX%hKk{;#UGP1?gFe}a&<-TRy807HO>swj1ON*Nun4(k=2{)ugk~A_f z8?*AOl?=)WY<#5ioK2@%YI2V*3(E-ntEK_cP+7u-0!9PnE5P}IASqP?<~Ti*lDi^N zeHY?(Ukp5d;w=*9PZqjOr)oB%Wu&uA!g;_8Dk*Ki?nX?I%&Mg1sbT==wGY}l;cteS z;oBJCn@%qA$);K&g5++RbR(RQp?0=IA_11%O?hl5PmaoaZARkq-uhz_LvLw1I7S>a zgtraG3EDP=Uc3se6t~uwvcoOFwzs$$wVvRzG)h}=0Dv}v6qD+5Mh0t{;VJB{m-YRA z0VOC!D7_C-(!MEc!^C_1DWO6x$s<~w_4wz!pHDp?;Su{m6P zpo|>+ypRi28g=#YgY364UMYj!ftCS4&M^{~axWw!MSdP_TN{oWat_aB>7F^>Xnz7+u7WjiyjB8D!7qUc1 zfq6u8MJ5%3jOTdTPT)ui6lCD$w=|}>@eJ!E(@k}1w;-K9duZfRj55sU_=fPS^C`)| z&U(pb;=c@9K_#MSqSIut&eqouDk{D}RV7#C%x#iE47eCQMQb_2?OFc-U)F{5wF~uU zQ{hh$c(r1=*X%QEZD|9`EVChnqy=%bs({XgTn5?*!!s4yOB~-2d`i~urnI!Qxm{by zkV?YbsC+0A5JRX`IRuR3?gA?-Sn9Gd#^apsHI*qru4zWzUB5BZtIX7F z`Wn}MESpa8CH0<@eF`Ph=3B;c3W9i3xj_Rv^MH3L4U)W7o9HLGxovXeQGYK{A85Ub z+fY}M1{o$NK;wmFz>Eb92>C$DoT#06>nZXG?&DS zYL{UwZev3kf0<(3hI{m$b2w{>ZhDSqk@VEk8{L7WVaq}XP^9`pYCiVJ*Ec_f zZ6@&z_NCzMb)(X5Z5DWLC7hd^c%C-%&d8JJgYzg$d!RDMAp7(hpd|5qACV*H7ep{!5u- zFqtkF4O;_Snw=_ec8xczDbtIy_WG;E>QcYb?`-s|o2h0(S`uXQ)J74&!Or$=!AM}o zBy?er*;B*1HkqvJ7}PX2xqr9G3hELW6=H=|oUu*bS=^fmIQ`h#2^+Afr2I(uJscLg zq@NAES!*4&+nHyV#x~`FQ;5=F+IR4Y8G>{sKjSOJwmvNQDXw2dV7?IXg{%t#!3)9T zD=96?KG!SFfhV3HIl1TzQSf>~L*Z;2=HW7|dHE2%WaxJ7~Fw+)dK zN~M_q+A>@gP)^nj&~j?^=ZpRcEbBD7M~c<%;!x19iEUY9e>LCi;0-vZm&o~v4g)9v z6V8b6_rYPU%N_58yiNA26t)n6!o z^3z#o7^yFJ1z-OF0jK`}%g%Gh^Z3%|!qUm%&lTKX{iUZ8T=yj!DANjn@DZD6C4eY% zfHH0PJL5I}kEL79sOf;)TUv;73`Z?-v=xOwE#?BO7#^w-%M+ZQ>eEg5R@yMWkHY%K zodVnWmrz>xy4i$bU1JCkSsm=sP7y#H@W*HX)~>1HKZ06!h9|!|R-q;0Tgh+zntVlP zzF^Ui1ae1c#nT>M)?N!poDecMhIfjiJH~}we4zgT_b7j8Vj92HtN#GtH2(nkprfGt zP|cqY5Q0qTDZZZE`sB)Y%y<}4CjHc0Wf zU@!`PW^8~kM?CrwUPt0j4E!mw@fzr|_-|Ug)ZTfbOIv>)B$6kRtR%oqJnaAm;dl

}o#T5;mrpPiDnHR1=3TN8jIm{q?jQy|(Bh0l zQ&Ym!cI=>^{+%*ez9u{1tN#GtH2(nkdG4Mm@DIb+jb-qkj%2uE`TH)VZRKn$jmIOn z$tR2eYnd7oL3GgGq*6qmG-U~LaNBnuPf_choY$V~UmgA&T3$zQCxi9hw}>MVkN8O- z464Y4rVWW3*>)Ul`L`&~APF>&+Hc{M(oLt2hg#v59kMZx#7O@DF72g@3qT1RmIEMT zjE-xzmlHx;;j4e(F#iDMp*-fj?e!|(_zge)UV1bFcDEaB;w*8A6~hzj_p^b)JRDSM zZ*3jhG9|!GfV%AYPxnA7FiFVo_+qnkzm7i(n%+;e=-2k_VMg+9JV639c{osm?BElQ zN$s2r*F$yj7vbfrf2P}B$GC#NcDDc$0(O7T4&_e=0fq?N7*}+2wIz7r>iyLJ0Qqq% zIIg$BSN{OOY5xH7@`SNKllf;)C8Q(=@|5g5eqG@F#B~SL9WXORnoDb_toA7s0uWuk zQUE*vH>(mCJRUQXo-0!K#Gei)iFG&AuQtHRmWxt~F&p04^W)y?^=wum1pm)BgbF z=WC+=(jO4KD{5@~J*#UvM5Z#UHkq$Y8aWCBF^#t?4uFOjBN-e4Se_#I`SB;kalMC( zEW9(XK+BE#--5K;i4dF}qII{8#yerN^J6@KG41tF3`=&;d*KaUHCe(&?X1SdnLykC z@;Jl%pmZfk;~d&sp9Lk#$i5rYWGL?2+g6N{4tU}o@f{*}5c@9q`9Po3Daa`Yu z^b1&?Y1_qu^;`neUIQetDCiuyr}NJ6Tvh7@jNN3;1HaiZAax($PPFQn+*8;2iPP*6XwZHHffAZlx;=Go)D*piZ z4L|-~cjXB05uy#k%mj}ECM&ku2=@gMI?Rh zbJL;vbLsci-wqn$NRNcIZ!&Z%8;xx|%#3nB(aHRI18;7JCT|P)7RL0>JT>A!v`-)r zy7-O*yme!PYug7qhx?|V_N{OF1grl5fW!X)mkaw=yZ-=~D*piZ4L|-~XM7orEzYeY zpf2VupeV=uw8Rv9q@ea2QzN_#)_R`2iNx5%`cFe?*Ndh&~-#Etv zuoc-1@^GQb@U_427=QBP-OKo$VdyB-!B^7%0Aic}0NsNh#hRU}d@R$n2buY_ zidoe+M7xRmz^VDXvw}yc>0Fhhz6`Rt7E|grHoIWl6>g@8KL;!VD<iPq~TwU8i`bR+PFT%P))V9bsUs=v7lqRRHsytNdBP+-sH7r2U_7tYnmYGVx!N z1ag+y<11pqOrPC=bil+1ECY7ilk->4<2ah9v}zbikJUr}0G6|Z9mF+pc%Nw;6?DJD zPyYa#2Kbxep|iAWi$4!X9EKKKSeiv*?8Fj7q)O_{BXhO0j1NJc2S!aE$4|Jnir&s4 zHN!Y9ERhv0kai>GNM|e<sm>wmL*-I*cybK-Kb+v7j*oLa|%2^*9hKPPSv zw_VGFny(JG@Iq-(-{C)v?WLMUh`b&;wskpmCPGe|j_S>frz!?=*zx6?VyjKx4^!@k z%_+M9w z<6F{_%6au~6~lEEw#%q4aAS_jADz|(8b*j>pHBwM< zQ{_#l(u`a8XEeR!(!0Cf$kms}6Zn?p^%sg6WnDfLaIm~_%@)$?p(6|!ZUyoehB+rW zYPI7i5z|l9?~S#s%EZlbfR+w-Gb*mlyJTd`r~!78cMOmZIra;CO<8A_5+wUEOL>Lj z<|3-fSOdyqVNvrE-!D0;ka&*b(^|aK^wW0t9#BbbuMo69D;y|L11uqe@T9lO-AQ3y zgj>6|zt#N@b~9=2cJS&R8`BEvUND9jZ9y^lYYN?r5bqcW?+7umB!=7qhr=lhyIpl} z;Jq7Hg7(*0w4A)aOKO2(auqTp@|5K_Fbm0GFvPJ0?ZDITyi=1qUI`^6 z$g43{W)e6IWX}Ww!9XLG1$z&NM^4c-sr*x~_!CsR(XVgr1dnV`J=uN1Rpi@}RJo5k zTXyCpx|MEJoEx*c^w;lWl%d;FZvcE2@duA|o84be)KLAB&A!bFN9Px|W6K67EQ;<~ zh+N@t_{%c-MxF4Jz)zy-z6!SR^4)6kLFZk=WM*is9of-PNb$+!7?=iGBvvcAn<{`+ z3y+90>Nghg_zT0!Zn6OO+Fq9GP0iH!Lu{ac%8{Xbf&tpZ9il?T+Rwu87h3pkTVEMp zE}r7zBR7*W+0M5M9Lmrt%<4pn?iGUWS9sJRB^hhZtx_(Psx2Qyd+(;-qu0*nRT90D zzjHgpKMMRe_A7f`M_QaFogSm9t3{~!g`$Wg&ZL+y=)gvzP8pOd61#?D_;c_VT=<)% zP2xWU_>^B5HJg1pD6WK$v|KY>VsafL2%AWEQpFx3%m&@f?j8*IJE-bcU+}QJQ#PZo z#c|}N&CS*9F*M$INQ=tQ60oxD3c-T1vhMr47QIK|CyOEYb7657gYfFwIj-&GGbEri zW;eKJnq)41NtH5%NCH-rz(K%Xe=70EggQaM5v2*0J*C&+K>OUk!M(PyWsD z-OLxa5{oPAr@B=zdGN<8DGBAP`6;?JXV}aaIY0+@i5{>hpf>Tl#&=jvk%ezcbnFJ~?PV0{j%M;P2VrM)4<#wGBj1;$IMWwU+KC zkh3dBQ)igb#7Zhg85uIH#J245KNY`g&)Tb0u+_9V{AijLgq9QB+b_fY8q-fQnHT*Y zXv|SGtnC>>-ea=|MpX{bUVsuX;x2`4E{u4T;b{!3q% z;bGOUR+JQD`X0aVt6K8>GiznzscyABR%2+ukP9nMN2ZAyv;Rs;uy^ z@Gp%#YjvpV-XYbrfj5^GjB!sBMDsP$BS?%K1qgsX0_|4A9BwQ=HPikk>o*#Fy0i^# zWhB=N_PaqaB3T0Oolq7bP$6HK1^)nEqa+q;>pV5_8&J};iNCRR`|DeeBt3=15~z>Q z^H`{L<)j-JuFzNoLd?VvqH1B`&YODg{eQ!k3|6Wt_de*-X7F`|%<)0uHfxBq%vjr8 z#Ii`24RT#FRggkb7{+A7FlH=Ol28|0O42+lr`*M?OLJx<)~1#jZA7u#%RKC;jU3yU zf>q15R1&5BR#pm6#9A+nJ}vmmP#VUG;x&%LP1B{h6Gc2uw+$g5Xh0R!wwTHs6**Nr z{H({&{vUqPejwC#NVmoALqHHDD;!qWF4+@=4h(@&3~kTM7`H&oB2?uWC8u+FR$SVn z-#kO&&l-;!L9OT(bKYq>ecYB7R+e$i61BNUA_+pUV;cvE2^VSF099fDGI;aE{{RT@ z;kv%@UxoZt1;&*eUR;_!sTK0d5ueF<{?+Bmn|8hn&dQ-efXlRwCLfFzaoOtAYhDyL zw=!G7c(#*VMC!9!v}+m`-pdZ}K2wDSvA^X2F|3_)P?N+umXRi~c!$Ee&6V}+wv)po_f9P=&GR%<+q>kULdwQM z30!=jo%{pyFN}OK@CA;yY@Q_1J|$@oT1PUbv26N`*7sHx>fUmBQ5eQ|@)l7xOEw5p z`HkIg#Cq0?Z}#0IK#oYutRHHYc;4U!--!m}pSl1H17UY=XC;F4%L_)FlwrT#{{Y}W z=^cL2B$M3Hm*G7AB%jRI{9&fq+(UID%B<-TwqKo|F>3>op_x=6EW2=b1q`Iab^AQq zYFZ_r_-W&Kb!+HaIVZAT4_v_Rf9H#tCT*)8(%W}8n0Aoe*Gs{EDYNkui+!o;NRk_| zcOLCIu)}(yxPc2tA>LF5lX8YEz-2}^w6BOd7sIVTRMT}mGgj7hd*oZAadxf|;$~cl z3O4Kz0V>&ITMLuQ)k^iL&z3!Q(Oddg{A`YhR84A+B=PUVEoZ>fUma&kxv_%U=H1)- zRl*qqp$UPwNLb|Iw>QiV;-x{ZMom*uvy$H5O_3m5&Av^vFcKVqS(GVa;~3~S9OZHe zVtq5kciOJ9blJ42?d+q~t{!6?_oY?A1)eCkD}qClxUb5r6+ zBLPb`R}4S}k8y)jMDRqq7l)$O?XG2bVv$Nfu(iufs;e%=fJbWU&xh|d zT{26(SM9daN9EkZr?tFpQbi0AoJp`@n7#l5m2JcgySA-bl^LsD-}>9}(B+il4SVc! z78=ZLs)e?-y3}6vBWsBoS6zV-m2x8hBVmstji6-p#_-nk&k{*%rfFp$7FQ{~*7Z!c z;ZU~RD&bYY7(1{5Qg~HU3;1hI@a=`xscWjF5Ip53jsAB53KGSYLhJ}wu`0m%xEz2- zJ7ssTYI=;9*Lsvz)^n+qOmLz~rwz4$Tmk_E5s|byfRNm- z>zc2I>?XOmB_NeSL~I&-Pu;rEPuU4Pxk|K~y1_lfz6mGcyhV2>V6`?l3YibDLaUHrg8q zmO0{%av^CB9JkB!00BEgagxEdsLl$5TgO+g{5IxQCx4#CrmYwy9_U3XFO>4Ijfh!! zDVI@z3a`u*07f{+BN{C#JvvAxp2hymR#tTc$t159TyEcz!+uEIFm{jt#TrJFcO9Jf z^INjI%GWO(Gl9KJ2?+|UafNa*k`$I5SmNHJr(Df2QK{WRKFlsIC0mlLia`oovC09- zUAu|Gjz%h4y4>e*y7#&nI+le!%wtb$kpv(KqF}9*cL=BuAlyk}o1B5jz{2F!jVHod z?yYR2P`g`1MUTz2WWvnsPS)H>3s@v*Uniux)xpjdgmF`wW#K;o` z0x@j1NzQup^s8F!xz%K`iq7#3x=A8jzM%4Lor~=(N~mQ39;0x`Jo3e9O{-aa`wl+~ zYgWvu=h5P{XlJ*vo_OY8GB`^-3|UkR5Hli%CCo>I<*zG}rpbR0Sn1XmFXD?^muVzO zpt?IGK#lWE4%XTkiB$|&b1CDD)_?pWTK1J2!>K4}?QBTAndCcx1$H)hC|Tu2AOXXv z83T=N3q`%wZ4_%BR5C7FMhrm7Bg#@#mBPEQ0Ap!AIXE02F0cCh>}2B9wk}(0uY0Cj zT0=A1>C;`wJfhmgRj*}a;!;>)5lC(0k+|gZj5fk~belVELc)7DnN?EiCXkKkwYR{o zpoI)FPFLm(cMuo=*?c-J-E4HLF(gp3#T?fx@`+b$s_2MF3NlW1;GL{@DGb$}28lh4 z4|{RG8+oK?p+OKbsm8^?E(5Co3b$O3nTFB~xp!dZ?{eJQ+W2ahx< zwJGF{EF!Kv-JMIuTyLC`^|q|hOdUTFA)K) zKvKU$uD+;cc&CAn?jj8WUm74*Lj{7(3R;CQbz-7%pPyQ$J{ zZY>1Sw}3q^Q9k(s0m8@NI? z;Ir%_2P!2Q$;B!ddi19GmDhjk=fB9A!VW9T`uxndzqs)Q{MQzC&wDI#uiBzm1AV>b zTq~d|LMQ}}mo2!RtC7r>zC6>Tj@hm)Vz!Rf+sb`D&7=lPeafJ3A2FA4f1D5ss0{GqP31j4>S?01#E02D>9OB3j*A9 z{v9R_H%qzI1aL`YR#^h>X_6-jOJp|EK@lDSCwdXptDJ*SzSVVM;+d^XR*P(4xHoXB z$rMt^>|;CrS06fjpq3o%?gnb{Pu+I&E0IB2FT9V#I?k=4$qT`7jL`W|MAHV9M&>N6 zRPGP8fLyYb8>95+rm^u%w*D@+)vff|KFK1nyN~-olProIw=gOMg^Gq|Jne0#a3Ix7 zbEme2ccw>vmQxvHy}!9w0T%A6%3MmgXPJSQDln>cf!3+&{wkAIxBk%n%a$0X)|z`Y zl0|ahL}QQuxF9mf2F!wfVaTne%EZ02VXL|8z6AK8XW}hQWAN^)172RBw1W2Fowost zhgT%w5psTI4BHV^MDH+@hzQ(o#9P3Z9d~o(ae&Pp0 zK_Q`x6$j=&GizEDUJbcjI^$7-OV2I|?cUZwGTKPs$c3arqj<)B#f4_=8T; z^<>sHDH`rY*({KwZAXxkxMc2U0oB6oK-)?-3Brz5hmALHzf-0d$wm$@e!uX?r1*2< zKZtxk;v0>2*H^e(n^7a(+QkblWnmhmVpQdq<$!#-0gv8l=yb0f=z6D#FSS1ycp21c z5Xx^X?IDO*ypzcyv7He?oaNB%ETxnjHr!*%bqzlAMzLw9)ufIo;Qs(uC+lwZ{Lbdr!P*=*>*C#KTef{teI_e)wtYQfmT<*(%25aIMq&!6 zya@LNz&!O@FT>x4o;UFQtlB=XmM-h|n>!h;+B=C=vSh@+EMS>1ixwAXJAgadyc+9Y z@dlC*c&>Y!JNe)RVRT?DU_l5~Bon)Gx(sJ=V8q~%TYo|L@2OkCVsxlwYXNn#I4&nK zvJwW}<$iL7h;6?xV9Yk;77(dQQo3b(S}i_(KOe~NG!KFrgkC9^eYUr!#E7umUR%Su zdw5^uGc2+Ojxbo1-?eaXs;V$+S6}!I;6Do8+_#ASAkD8&EGFje2=_h2lDevh;|xrM zt^yLwr{`Uvl;OOp>*ME&TT-`{?^Q^wXSsfGRw)ZIys8*rw$?Z-2n-WoFiF*|~X9)!p0@%a)=MN(8I3zIHv=Lj{H^k2v=~gn&6E(%lG9*x(kcC3- z`-*UO}xUgc`8(L1U zYpr-`WNmud(ix_X7+_0mZboswMB9_JD-_8XD6B(d0bOv!*TUIGPw{J~e=WB19Ayc` zNcJro;^&6%km>4HMu|Xj?xbO3Ex;$XJu*og}r#%SuuLIPz^WoKR?K}HVwFZ#S=IIg1KiW5CR01$g3IKUkU|0c)#7abv;k5#<8VbKw|q# z0!Ab9{IJq14ge*ABY<){amlIdA&nsr!xWP4$;)k7By4076rXN{k_SB3%$7bo(Jbz) zEnvOYOl<_Sw05^_K-xnVaB@`$4}cFm9B^~f&GB$}YtPgn@f7;hHt!r_7}v~|LbLBU zWE{H^a0v_$jE%S*kj-edc_xaKT1UEF-OF%-DB_7k%%lk|$t!`7RP_Yl_x1LwFLxYs zI1LfmAy!o+lqar8JY;jn{{UXND1IxZrDiA7VU}BGDnx<@kh;2t;KiJ=1OmH30FJBI z2gm;a7InK#GUCHe(j>gpg}_LX+GJHmCjvJtNK(KM3t$uV#dN%O6-S$xIVWSg@n4Dj zJEzzgWJsVf?Ic4mQIasLjDeQHIqGqq_4EDz0K|Kpe&Xs4JuVu=+{m7LO2k`4r0imn zWmJNAR#F=Ssoht+Q!VbbaaD#MN*pQ1-T+k~kU%~rHx)cXwXBz)_5F1RPg@uo-inJY&a14M z?=>5gNnS~nM?J36}n9VJFnS#Er)#Ni5wp{6(e4h|qPBsSGDt*HfQ`Uq zl~?8GcguhdJ29tte(pP|?lk1Kkx(D8G%_MejLNFV&I+mnXYLKepb8f%&7rElgzccR zk3-X3>M0&>RNwreLm=}x{{UDNsQ|EDps^c(G~(|Ct=9g3p$eY5v}9;nhM{w#-RZi; zt-NwZR3wpGpgW7<$qdW8Zq+?N02KkZ3|YL;ZkJYzQ;%FNcVTX0#-VQ5l2>4?+mY}~ z2i!|G)d5F5hN*R{#dUuz)cR$#Q<+sZh~o(>AsdLu&Nh>k0e(@*Jl3X_uijnwq4e8$ z>~0nYXkm%`s3nnhEIVQ(FlHMRphWV4wn-$hGj)YLWsMS)GH}%iUTt8p_rMW*R`uF z>n(En!p>-JEoQVqYda#iRq~3j@hb%=0md z(1Jx_Nv&$P@x^rZuM;}Bn&K7Q(y2kF!^xIxniRS)^7g*XwQUtrPJ#kAn=Z_u3ZT?Y0+G3u*aua+oHRe-Uw}s ztU>@m4I2`q5*1W8$-2ge4Ek-xpy@nGY9T3i4Y!#ghy?*yJCXr*Vp&UPa`A)3S+x%q z-rT0HlB3xKONnESW&1&Zyi4~PzT+8CDO@kis>MTK;Vdee+gA5q*Y)Ie#&TX~*LQ!n zFNvhnrm#;A!KY~l&o)+CZ1OZ-VuL!mGn~m8Aqyc2Ny+&^*xd$~`$c$5P4M&v-e(}@Je0Sq-CLoBqbEC)+@rRXOM&Amw`2R zwTHr960wSC16xaVV{pkFyPS~V?qL~1W!e>jowzyRpPw$gD}MH}-e}UJ`S#mB#}avQ zY>l>r*ubb!lnd07jeu%9EBl)W*524XlsJ(eHz0@x)GY2;Ghkux0Nfeh00rhguA^nI z^`R22QO?JFzZUcjLTGFt(Tm9}rDTdiM1>t)5ptt)6-PV@uREAtf$Jcn0;;!hDrb*0;VhSnKlmK$ho-9jl; zZGfHFd;nOnZUd+p1K)&@|8<<4zRG@CK;Jgvd0J&j?z@I8<+rk(v}*ZHP}W| zrrMWNqR@ULYDPG%BJlN$uuBUoNp8xKM#@V@z-{uw@7kbYcW-ULl^M(M_lx1y?d`NL z8F+3P?KeWgQy8|Hi&|0o#!Wlomh}7O)fOcy*^DlRhZf$+bl^dCz?_BWG>WYA-OHdEJo!x8Cs=>bfoP6 z0IuSc)RnJezq`^r5vWgLf2nwy`YFo1utVi}f*DnZ+2#xLZ;gWaMl-xBl{>O9IelM6 z@ZW{?TT9t>g8RgG?k+IpDR3(NKMneM3psq&+ahYwa+G-a+*%}{>EluJ;V@8%Y zfze)Fy9s=PHvHVS0P?gk}jo(5@Ch zWJ{2&xFcp52Vg77z9jgY#8y5ShW`Lo)EDhbZn4R1B-qgeRSc(ZZGdJzFjVkIC(rHW z)mF=DT3$8AB1mx%1qo+gGw+4+z$LNC3zi@d0q2$#RF3!lt^WW9{m7KklZZ~c>4E-Bpw&mC)0I! zSsLF@mEp9VOpPgRq+3kOBdl(As}%)h1+la#{Op&;_ZoJk9j*4bB$QbQuZ(C#ynup2 z0u%-qP{gk!uRP;DSBCy7Ug#+t+HSDX+D3}k7S=N?eq#Wk)m?V7767RuX(Nx9B-R!1 zQ=3jM&-(H)ScM(WUBB>tm#63t;tveyn#Ie?sTA7oqZ+L6ZnkBP<`@$qL}U_t+nHlt ze)B6YH=z6+@pp(kI`{V0dX>~OTSV9AU9|G$hCr-{h13*=8~w2iBR*k&hmddKYnt=)K9_u=eUnt`+Pe5-43 zw}7S+q^gA69b^g-6f-Fb!+9-&Iqiy}?^9~--8S^s)B3sS%2fA9U*W&mx5swc8(Qi5 zE|}7`AhVI$CfRBf6l6P_4ZLn=WgAEt8-ZHr^k3QQ!lO*qZ}qE7i@3FWk0$o^+^lok zMKWzxE4;{6LIneGE?9+C0JVAaJ~zIw(Dc^R^m`jPVknKg`SF;yGxNl76k^;9jD|A*SosvApWL$4WR95O0Olr8ooaHaj;_)mc z;JW)nt#fw`g4{`Sh5>goFV0nB3|r66Fc`fylx8xI4Cj4Rii1@fNG7=rSjbd}AKNc((zJvaF85Sx^>6 z1RxGcjd02sgSQwQ=C9(NYV%UKx|!ztJ=A+7c1)zPt`RT++(955sK5kcB%E9@wIfj} zyQTO40Kn7k{6#fSPa=O8c;Y`5X_hzA_(Bab8#tfsQAZNP9Fi{7*aqK_5z4M`6^SYs zJ(Lo$n4lF=iO-E6LM+`<-p4~$W zYgrU5aY)$Z)P>qkK)^X83KU_1wV-(SNztT$%b-CM+ggidhTiEGbr9b3PB11$v9P^51oEu)VyV(>o*0jl-$Y)5=_0$ z?v2JsEu1q0zy(e)g?KjQf9vLlS4|d$JKqyetY=S%3k!d-sl2z-$&jGltfd1pDls4q zPEOLp0Pd=fb$Kb0TbqM+3|O_oz5ED2l|&fk}V#}3#0OQzgi%cf~CPb)3c z8=0qJhmn{oYzP$@#|j6g4n_bKtrqtBJCsYSdyss@yp1OFAO~*I7%bUvr#Z-F$RKfy zb|~{{(B`O{-IjH46h&ovki$K+w^uknYFP}7;w*W^F!+_2k)7;SRS6>`iqoG})Q6qo zt$$CBXLOETPH7w&_cLveX#ti{p~G>Ftj8)qR;)csOpi{IO;*+I?TjeU0S&uI9uV7~ zEk#jA6E&yNSqFU^Sx% zN$;UF`T3tNrnUEuG#MDn1Q>)Nb&lp8tYs>~79g-J0uaHQf)o#%A*)Ic66sccD0npM zsBbM}wzn|L61-`d8*4@bZBlZG*e$@$I60`}@HM890KC_A2Q7IFkYTOWRY+%yZ6yX- zSTi#P3IY{Vwc8jvKZ5PFl!sPrblZ86hiUEAq-dCi4iSk8nR3zKZo+-jxQeIj)zi?? z^F{l|MHaUHDrj|wo?LnF94^v@b%{w|JPaMdn7&L(eW;95*t1_b%!78M_GBQZoPhd_>m+=+E zdUTq7o}VlaG>REhF55`UZeVhYj4G%sS8Eo?IjpA`HFV5sPR`7OOz~CClTE8ZE5-J^ zGZHAbQqiFSK@8YsINnIf%7Sta2deO2#NQKmQc2ceOn}Zd=j5Nwis1B^k!~z0_ zJcbOzqOb#yYpFFD?CiCc+iLlh-2(!z67MAl``iMfIoudF0L2%Yh3=hYG?sU36a`_M zP>3*nh4S7|7|0>V2*q*bi87V&*Lh;x>$}%+V}SDMboc zk)5iHq=HBzXl=$vLJrp**@J z#=RcxHvS8t1h7h^K*drC>;aq^*fB>5s8@yMS8uhn`udJTV=Gx=!P{yaobQ#XlR07W;l`Fex!8pRx zt!$xNIW<}D-s0XHSpbSC_goqB$0Kpz70G5NaN2s~6;5tSDX;4Nf3Bexlx%bBXX86f zGD~fHPM%mE6D)RD2pwdQwgQr%1Us0WpkcQzPYNo`626^sEwtScUN)X6KGC_1F2p0V ze5#=3(E{a4k{ctYI<(d`I5m-Sx)et028wdCFj)MADVzr6?ZIN@Mo7Rp$XWG$9{x)! zeOWHn2qZ|whr`C z_+v-1V(QA?dle~%P^!_Ez)-2RRZii9#u;R($s1T8 z!zw9I8NAGlZ2(Vqp!mKg)NZaP3U4loTFY=02_;~-a2E$Cq$yl&%ClpDX*K2k!O*AD zWYi=uTg&GSLE{k@R@g%ijs{fT)V=oz0(^AG?bhUXC>OxWBmY z)G|q@$t|IKc}V*-L*@B45z6!|Mi=H&$IRL)kx^Dp`tA~)qwc#ijj_?SXl}%|*}J@t zq<&O|EyQZYQB!h_#F3E9pzSIFVoh`!4~49C9YQ@LNYd<<8K;T$DX`Mq?p5EiAjfJQ zgrbf6gAiLe6}6-m>-`Dm301js9Nwm}cyPbc- z-wE7XN2qCGwp)t1P^;IFbHiw3lQBw|RhVU%Omy!uy+>nBqPm*RSm0y;BwIK|QV9muTy; zH;H5lL7YTe1(XKLf>hStwPoSI66%vDi*@NVWZGxs83J!Qx#_CA`$_?qQPNX=d}|b(P(nOU#8Ia!5(a3~mDo zssI6`)I2Mp>UwN1bdtw!r@J)wsdAAx0y*bpa=SwU9Ih2X4V6_NDHN`ByDtuF;y(}S z5KE`r-pO%nB8$vZ7s{hxkpR!k0Ad$l3-dE60@b6~yk1s~Zuyq??3WTH&fpBPsx-m3 zFj*Qy5^Z1IuHd5sEIaAKwHsUd{=cn^;F_N$jyuGOXJxGYqfCx9vavvItlr(3UftbT zInT?O(448~DA?#p&(m+FwX%a!vA&iF*6pp9B3?-u(ga3OP>kUm9{2}4hT)#R-%Rk= ziS?PZy*t9zi3PIB71Y0J3mU{$MQ4r3`|3$>7jR}!3jo+`ZFAxMzKr%dEM6zmFJQMb zyfA4~F^WZ2h8I!0X??=n65EwRMlcTG4_t36y`SNs6Hl46_S5nq)Vxz~twnXHc#Ya( zZysoD5*#GbPNV|k1h`Fr`D>4tY7>A!NVOdlohB_R>RUPN))a=y)UySVWZYw7$^xJW z7=y}zfyQaNtS~$=$EFmw)5Wwz#cvXmIScd33m=pM9Z^OIFO@h_%>6G;iLNBD({z+a zj(Hx}1(l3tT}uYQ2!IH|VhJsg^K#TmrCBRq^8T;=bO}mHD5f(>`xT^8-D;Q8-8hxy z$zZHVUZ63@A&@U6zVP{TfzxRoJX>gCyt}lu7V&Kn+}tTr^ReCc@(Z1t3k+Z??g4h? zxrqEZXLWM=Z-*i!BS~27|Dx8%&fKu!CMskR~bCaQmmJ*=h$ z!l5qx9}Sk_SP$=~TmT_+!$kBoqbiY&uVbRp{{U!RD_9q{uizgIJ?yfHZkF0Lw^%pA zB77n;oy1}D$yNoJ5Li{&b^ibhYWlpgTliYrOoviYCCIhaCW(M)WCS|9y8r-P$K@>C zu=%#HpW6I0($d0vNwo*JX<@s#x{?4GurTFY9ZZrEhye#^4T19--thkbhrD6oopxAaO>)0x@Za{Hzo6-H499fQYEn65 zlwdm*!v!IB1t5c#V7o{nvo%?~Nq4E;++K?~ZkY|es#?Ps2wVvx0woHuf`qXP_nW42 zNy@pp)-LsF;kT71)g>!6!^d_v$17pTEEw+F!#2@|F30A_5iCZY;=iBu+#PhIWvSJ* zvfN1(^Pjg%G-(!{9qDFwJKV&W#^*bU+Q1MBfHDT!u)ZR;y3)noq|Y2`?{ERM+>u0^ zUjPLocJ#(Ze)iQE1lI#=cduGsTKJxOr_(}{3@v2A88)mgq&Q;g#a#Tsw;kZ_1!}~z z-b*}QP5tV`BZ#%3mDsz6eWdOARBi~s;9*pfda;O3db254St+I(Pe9>?*j;>e3BfN+Q9%Djsq?j4cp0T8a3vJdjd~+s=Stu=c`Eyb9Ow>no@EY zhAgCHZBw*u2d-|La~3NUUO!#B8Nw}7NrmohtuGqg*UExOkz$(Ro=@H?AZB8Ol2ox!aCZ@m8rM^_jyyZBYs_bo z=gPZ}QbuQ8l804bpeP%IMYVoVUw6(j)vPquKc`~jE?E1v{0mUrXs=@|a9*^tIum&7 zAXP?n+qKCgzCZ^dR|)|wk^!fy%cg2~*3F~dX>v-J61I*G+ZJ1AV;Q$7!0!PHunmo@4i3_w)7EZazR{7jI3%~4 zNhN4*ZSIJQc#4@6`Dz0Ln2MGsDnf#ICac;e(!cfne^v;s7c({OFSNZgPLoH~^}#H% zVhHY|Mv)@iQBgyaOf!&FF;Dg>&!A~RQ3QK0_(|kf}$n?0QR5#f3NHOhZgO7Zg!V?a%qld)wKg|f&gA9u9;$t zE(}Oh8DL2vTau-+_)tiFrmTf;EaknFJdUG$(z1Z!MRFS>02jb)@DARY%}~*e{GV$6 z*Ri-`Z!v|w(Pm6gO75U9&c~8IUD zd;BwI0>H3^8H-~&M_6cS~+ghtKtbssQ0fS{+ zAZ!-gak%bKYgpo9qSd^$>HU62va3!qwY57fI!_J5sljh2gJslJ)(Ivjpu6s4mx{J0E+!T#c)XM|;XUu{u zrs)^BJArO`VX=eP�p*`$c=Zb$WNxZ`7Y@DZyOMxW3nHuH(^Rv6Mn>+6#MSCP}45 zWJYW&k@Mh;1C`sj1>2gv;2Zd_Zcd<=WLv$kE@Wj0h;TS84)8!Ee-i~Cd4UvYeh={; zzi}>~0wS%nfgn|37q!2{+mx~*%$Vsj3=Vk_Z)qsqG z4?w0vw15xYDA^5H3T>p>lcz3TW0$+}f-U4)Zl5%ZG-04CDk{dxy8@7`n097IEu02Y zjJQ13rjKu@rRANoS(~|JwJi3**Z`_avpyYJl}P{)%65{ZfOA=PIz5rRF=|#(Nt=M{ zENqU=8Z#A)E^t|aav4+;lY&KEhv9K+5v@xks?P3>zB3xyoQEf21S;T!$YvQ~v=VSs zBX!x41yWsZ#aVRS8f3JaR)+F>$fQ{#iYH}>qzqC_jloDR3Y8;n@VRZcu4h=4BDi?& zwKRnJqBzz9Ba!4ntQeN(4Yff$;iDvDHM=I1W1IUN-%M^K3mO-8Q#_8rg1%QQ0=qWh zfHx^5AS7NlJA-HI8ArN znw_<@4m5lC))tB6d7pQhNDGizLoig2kU(A=wQOQPtEs;$t0TYd^_XK zM&=t$44}8UTP^n1s1==LQpk~D6R-qFBn*ZQS%*_uJUV@Uuj<36H)nl6Gp+G1nWRtR zyNHBKG;Mq%@q%0|&aTat498;y*^IVFjv=4Huq@Vkf0Hyfv&T4>do`xj zWnAx#b~7=-Rv#-i7oh_aMDaeaKZfqLb-jjJ<(+NRw39EFDKC$c<%||mM)u`H5HW*9 zo+t4ZyR3g;>ayx9_K0S?y}R-RlSl$(i1Q9ymtBAsDo#soAPVPLdX(qN-kzRCIXOEV zp0%lH`s*E5=E^898e;OY$l11l(I+7-eAyNevQ1`5;k`87?FS?O9G#FAUw$S&@@$cl;Cw~%5j zxGqQ;!R5Fk7#qGRw~o_HiaUMEw(-L>BY8VyXr4mco=)OO&rIN8aa~Jkwl?XbSX}orOYTWjSk5U&<6|m6l7rFV>l#N3a1G-7JBLl#n|D_v#xlN;kC7%IL*4NrK-dh zAgURaq64QS51Ty5GBcb{HlgAf;7ePrAVV>?kz+BqA!I67%R8Br%nFVa0IbWyFg3p3 z8_@Kz{H#j}{7Yi%OV z%6q7Uuz7lt?3GoSe8&=h2}7ODhEf!Bk}EgE`W}@pjjntzC7PW*WK>I1PSsR+oB_`t zF*(Ovka9S!>(!0^DqT3Yi4K`0Z>N}|RQo$dXm!HA)e2zxA=7vy9PN z$HJO-nwQa7w2;K|%J5uFP|B#m1BcG!C2$BG6_f(InAM$UMbY$Ixy%}M?ZAlyAtRm2 zukRSLs6&maS(iBZis5joxzX2eE@!;fpvs#q)J4DJBn9=$yUXJ+3oPxv7ya%s@DWou&wj4ckV>veEM!o~(GaAPf& z3{~=@EXQzMjslF9HLCi=1_iRazLE%2_kuNytdTKNz!E@U0R*vPrzC~O1}Ny)_coe* zw$^@J3mv<8Z3HTeg?EJ^lmZCsLCtiYDA076d_N7;c7RAFxRZHyCwygsw@gau0$kE3dpfBe}H+-OT^B>NgDSJe$zpX*HO(%1_SMcA6tXAgo)mlY`BaN=alB|13 zkSRcRu1ZL9RH!Z7^Ic8HhP)%H*hOuoXqt72g#-_(UR|k6XxhFMi(VI98`GXq#e%Ys7 z1Oh;TTH4=Axzz0y_Y5pj%v`-J-Y62|G)A>&9 z#P3#h+!!7kIXs-6YePY@x4Q7mUuX>Y(WaPgk-ydjKpU@7fJdPj9P>++McUw?xvWQ| z_=8OFMg8xJPJ;%adn}9~j(G4vc*_>VL=DVb0$Yqo*xGPMU*o;^UkyLB{A|gg-Q2j+ zC0=(DZ#;K`WLuF?j^ra8l)r^i^nMBc$8JjG? zU|Yj48-XKf`nhjx{f`h4H1%E@^X=@%1eY?pG&3V`m41+YMU0AnJkkwH_zkuU)S z3=b8FV7Rci)U>(oZFjV{i}poCf&x{xN;9zKNpg8%y+d*Usa`a-_;Fz^x&$(;*DthS zdE2&mnC#nCMn^v=J!;pEEnzoy7OJHaHw-+tDnUu!H6V^g5kTPc$vl!zx9rqqW1dvf zy~bEY9G4JUc#B06Kn(MhNuB(qWdy_W;4x>x%NA@54yUYpbmX2o09QX{PES;nI33i^9`zSp-{0Q zh+x<)zyx5oD^Nw@R)R<;yVgLnw~RvttoK<7^Dbt6*;Xo!%s?aNRZzb+D}Pqfbo)(e zR*nMMy@ni2?I01PF|w!(NB}bv$5VoN0=bU}Y}YhdV%6>8xLD-*7D!Zp8^~fhbjvme zKn?~66}n9;-$c!l=uy{veR*jt5a==5+Cvno?m$<-Y@!^V3k1TGl2j5Da@m~U-&+Z9 zwGA*io?KhSsSv2qsc?tJ~daR;4XHST9p(TR(!>t=lx z9YV(X8;>&S;yQ)XtVLo*E4f!0eX+8r$RGw{KsXggUX3-_WYs)8*0L7z)9ja#ZFedF zW;tN041~ALOJpcuHtikN&wxA|s%dSjYF;0L;%7ud=K{&MbH}tHJ3{PGFa|ke)rl>e zNw9w!zKmf%Vzj-ojte0p2ts*IyG&s1VSoo9(F$?iFYoPd>#;GT8O649S_g(aVQZxw zRur;6LhZMj(&40x%D^3}iloUR?8$8H-N@Q;omRcqEMjjMr#G9*ESQip8R&vFL z@VIhVGZw%H9MhuIFZH{74O>*co>}j1p_=WCtQJ`%i42U&2vS(};<+CYYdVxVz1qWZ zuqz#kh@c?*Ov{A;VN?zP7z3~sZjHK`a>fyMI*l=-xzi-nbtu**g4Qc&*&|Y7kz`~@ zl``AIMxzaaR4HN8bz*B;Ccmm`Q(dLq#nG55B$LR|M;`YbOO3!1j?1v%>|7iW)$PBs zFM@nI@lI(|$5yl6q;}UXv8D9TsXKt>Vs#8XjzwaA)czIl-^0%m+Ybcjw)$t=T~_Pt zv4!(j%~iqnAwWL7=Q%mbX;ZAA^?%pQ>V_UZ&)ObysYW#43O9!m?jdid+`YS8LpTMQ zn7TQR9#K_swg6bFmFrd9x#ekK2fq`l+JL)i5pb2 zFWkwg2;2g;7Z!0^4WsvJ(?renB!S7v;0)rV@lEBP}xPP zYXz(e2@1&)%PAxX(8(ZB zRwKC-5Elc0NFJHv6=TC1=CcCEkZRW`F7+!oE&@!$d}|X0P7eSMfOPFvu9Hpe{{RkU zK32<9TZ^~z{7rHeHFb^{;;{G|56Xpi+3x3%R%~z+f5Y@jr+q(zVH+duP3wWVb9;93v393xG%%eo7rrNlZf+8Wy4+(A9W7R)0{q*f(? z=^@^5s;pQy2Z6e`ye*>XmcAgn@ZFua+3c=7KW}$5HqOzOm9DMBHsv9JAmeB_z#MQZ zX{@aqS=2P~ZmA8u&Y=_u5ZSRJv@D7?;{Y-#KiOV zSBRmr@vnz8oiYgIy?At>s;O?N8iZo48an*uNR`l+WgA(3Q-x4D&j(EYQJDh9#`z;Fgf>4B;YeGGo z6!zST{x_Re)MC0zqi1DeP0yJK#lFT-!-4Y=jlf_jE3|Qf(cHbw#F~}NNi@?yVgT9| zg@mmllEgVU7;U@PKQY4f$=30vx2fNFb5Dxy=YO4V9Fo75q20u2K`aJx2_$pgvn|l+ z`bGVvq|&NEq|0*=Gcz!ec5Nkz$pa^XanDL>FpIhgQl;%}V>`lHwzsNla7W?^Zsfc3 zm+Z*|46&9~04PLV%*z=iLjBT=-!~ZObnz5AjFIbe-2J`cVX!CeRi$X1CTDM$6+-2< z@<~oZZUZMz*0oo)g4yrnF{QQqv8L`3(gt;5=yy9FI_I3=V;=8Nytcc#Sm!5w%#OxD z9Fh`I)w9$92;bd!>JTNZgW{FC)!5}+|5x1<4ag1^} z`9=p##`^XD05TyySf}#*39j@7yzn*JTWjeAcE}_0R|Lzw17MMYqyl#z;ZT4tbBww) zl4>`1GHG^$Wbn1DOr>I$Q6W((g$0{yJc+ad*<*sa$X#YxEbJrEE!>M)bz-ta$9Ca~ z`jgIh9QES69TF7PJ|4{X#hm{DYsfRajuj_hIPH)QIvjIKKK5xum5{g8 z1Yo*m=trJ-Cu^|D8Rt332DPP5Sc=hw?4Qp~$c0CKc6pwy@VCdF3e?7{H|;*h-A_2PZzcIj)Dq5hOnmF72YqqD6AZ z101PQ7Saz~uEwi+U<2#R*8zlAW0sI9@R{5{U)7m!nB8S6z zbh^t6Ssw`rc-W`V^Woa26Y#pt~!s zFr}3cl0YTfiNXH> zO}w5fl7yjj23Oma#s=UzW2QQBSx#KFESZW*5^ZQ?YP#K?sRibu_l|A%sD|P>VrD4h zoMfVtxUd}tFr%E3XveEuU*EQCjg~nv9jYnAD1mTOD?R zf8qG&U1(n1!z2lGih|U$yM=BO(TO0j=j8yd7>a?l3vE|bn&RHdNF{~jx1HW~G01ih zvF>A!EFjw54=OU;EmZNR{7EObxYAGA-v0o@_WuBDX!tT@EQI?V{{S{d4r`dzZmun^ z%n-_UDtVz><^`4*$R|B|o(~5+*0Ohe*4V;H#wmH8kKykYct1qcZFJ3V!uK%449xJd z$mSSW7Fgy|vnJxp5!yDh5*UD2HA_PAXTt4gQn9txA=JX%N?vb1XMFb1$S-}H?h(^k74eEOa68iU*qFi+*(vLR(`f>n428TYLZhhabAG5a}*3S8+^ z+)k{3`I0P$40G2Q1066(=twb34xu+SC$74-*L|L!anma8#xdnyw%71Ayj&+g+8UBd zlO@&TTiaXBbj>RA-55zBm@H5@kTMzgi*(36@@*_$d7cTe8{aLWO9Iz bUI~MB9Q7xjgpx2xH0oLV{zenLtbhO6P<=PJ literal 0 HcmV?d00001 diff --git a/mmdet/__init__.py b/mmdet/__init__.py new file mode 100644 index 0000000..1c4f7e8 --- /dev/null +++ b/mmdet/__init__.py @@ -0,0 +1,3 @@ +from .version import __version__, short_version + +__all__ = ['__version__', 'short_version'] diff --git a/mmdet/apis/__init__.py b/mmdet/apis/__init__.py new file mode 100644 index 0000000..762f5ab --- /dev/null +++ b/mmdet/apis/__init__.py @@ -0,0 +1,8 @@ +from .env import init_dist, get_root_logger, set_random_seed +from .train import train_detector +from .inference import init_detector, inference_detector, show_result + +__all__ = [ + 'init_dist', 'get_root_logger', 'set_random_seed', 'train_detector', + 'init_detector', 'inference_detector', 'show_result' +] diff --git a/mmdet/apis/env.py b/mmdet/apis/env.py new file mode 100644 index 0000000..19b0f86 --- /dev/null +++ b/mmdet/apis/env.py @@ -0,0 +1,69 @@ +import logging +import os +import random +import subprocess + +import numpy as np +import torch +import torch.distributed as dist +import torch.multiprocessing as mp +from mmcv.runner import get_dist_info + + +def init_dist(launcher, backend='nccl', **kwargs): + if mp.get_start_method(allow_none=True) is None: + mp.set_start_method('spawn') + if launcher == 'pytorch': + _init_dist_pytorch(backend, **kwargs) + elif launcher == 'mpi': + _init_dist_mpi(backend, **kwargs) + elif launcher == 'slurm': + _init_dist_slurm(backend, **kwargs) + else: + raise ValueError('Invalid launcher type: {}'.format(launcher)) + + +def _init_dist_pytorch(backend, **kwargs): + # TODO: use local_rank instead of rank % num_gpus + rank = int(os.environ['RANK']) + num_gpus = torch.cuda.device_count() + torch.cuda.set_device(rank % num_gpus) + dist.init_process_group(backend=backend, **kwargs) + + +def _init_dist_mpi(backend, **kwargs): + raise NotImplementedError + + +def _init_dist_slurm(backend, port=29500, **kwargs): + proc_id = int(os.environ['SLURM_PROCID']) + ntasks = int(os.environ['SLURM_NTASKS']) + node_list = os.environ['SLURM_NODELIST'] + num_gpus = torch.cuda.device_count() + torch.cuda.set_device(proc_id % num_gpus) + addr = subprocess.getoutput( + 'scontrol show hostname {} | head -n1'.format(node_list)) + os.environ['MASTER_PORT'] = str(port) + os.environ['MASTER_ADDR'] = addr + os.environ['WORLD_SIZE'] = str(ntasks) + os.environ['RANK'] = str(proc_id) + dist.init_process_group(backend=backend) + + +def set_random_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + + +def get_root_logger(log_level=logging.INFO): + logger = logging.getLogger() + if not logger.hasHandlers(): + logging.basicConfig( + format='%(asctime)s - %(levelname)s - %(message)s', + level=log_level) + rank, _ = get_dist_info() + if rank != 0: + logger.setLevel('ERROR') + return logger diff --git a/mmdet/apis/inference.py b/mmdet/apis/inference.py new file mode 100644 index 0000000..222fc83 --- /dev/null +++ b/mmdet/apis/inference.py @@ -0,0 +1,143 @@ +import warnings + +import mmcv +import numpy as np +import pycocotools.mask as maskUtils +import torch +from mmcv.runner import load_checkpoint + +from mmdet.core import get_classes +from mmdet.datasets import to_tensor +from mmdet.datasets.transforms import ImageTransform +from mmdet.models import build_detector + + +def init_detector(config, checkpoint=None, device='cuda:0'): + """Initialize a detector from config file. + + Args: + config (str or :obj:`mmcv.Config`): Config file path or the config + object. + checkpoint (str, optional): Checkpoint path. If left as None, the model + will not load any weights. + + Returns: + nn.Module: The constructed detector. + """ + if isinstance(config, str): + config = mmcv.Config.fromfile(config) + elif not isinstance(config, mmcv.Config): + raise TypeError('config must be a filename or Config object, ' + 'but got {}'.format(type(config))) + config.model.pretrained = None + model = build_detector(config.model, test_cfg=config.test_cfg) + if checkpoint is not None: + checkpoint = load_checkpoint(model, checkpoint) + if 'CLASSES' in checkpoint['meta']: + model.CLASSES = checkpoint['meta']['classes'] + else: + warnings.warn('Class names are not saved in the checkpoint\'s ' + 'meta data, use COCO classes by default.') + model.CLASSES = get_classes('coco') + model.cfg = config # save the config in the model for convenience + model.to(device) + model.eval() + return model + + +def inference_detector(model, imgs): + """Inference image(s) with the detector. + + Args: + model (nn.Module): The loaded detector. + imgs (str/ndarray or list[str/ndarray]): Either image files or loaded + images. + + Returns: + If imgs is a str, a generator will be returned, otherwise return the + detection results directly. + """ + cfg = model.cfg + img_transform = ImageTransform( + size_divisor=cfg.data.test.size_divisor, **cfg.img_norm_cfg) + + device = next(model.parameters()).device # model device + if not isinstance(imgs, list): + return _inference_single(model, imgs, img_transform, device) + else: + return _inference_generator(model, imgs, img_transform, device) + + +def _prepare_data(img, img_transform, cfg, device): + ori_shape = img.shape + img, img_shape, pad_shape, scale_factor = img_transform( + img, + scale=cfg.data.test.img_scale, + keep_ratio=cfg.data.test.get('resize_keep_ratio', True)) + img = to_tensor(img).to(device).unsqueeze(0) + img_meta = [ + dict( + ori_shape=ori_shape, + img_shape=img_shape, + pad_shape=pad_shape, + scale_factor=scale_factor, + flip=False) + ] + return dict(img=[img], img_meta=[img_meta]) + + +def _inference_single(model, img, img_transform, device): + img = mmcv.imread(img) + data = _prepare_data(img, img_transform, model.cfg, device) + with torch.no_grad(): + result = model(return_loss=False, rescale=True, **data) + return result + + +def _inference_generator(model, imgs, img_transform, device): + for img in imgs: + yield _inference_single(model, img, img_transform, device) + + +# TODO: merge this method with the one in BaseDetector +def show_result(img, result, class_names, score_thr=0.3, out_file=None): + """Visualize the detection results on the image. + + Args: + img (str or np.ndarray): Image filename or loaded image. + result (tuple[list] or list): The detection result, can be either + (bbox, segm) or just bbox. + class_names (list[str] or tuple[str]): A list of class names. + score_thr (float): The threshold to visualize the bboxes and masks. + out_file (str, optional): If specified, the visualization result will + be written to the out file instead of shown in a window. + """ + assert isinstance(class_names, (tuple, list)) + img = mmcv.imread(img) + if isinstance(result, tuple): + bbox_result, segm_result = result + else: + bbox_result, segm_result = result, None + bboxes = np.vstack(bbox_result) + # draw segmentation masks + if segm_result is not None: + segms = mmcv.concat_list(segm_result) + inds = np.where(bboxes[:, -1] > score_thr)[0] + for i in inds: + color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8) + mask = maskUtils.decode(segms[i]).astype(np.bool) + img[mask] = img[mask] * 0.5 + color_mask * 0.5 + # draw bounding boxes + labels = [ + np.full(bbox.shape[0], i, dtype=np.int32) + for i, bbox in enumerate(bbox_result) + ] + labels = np.concatenate(labels) + mmcv.imshow_det_bboxes( + img.copy(), + bboxes, + labels, + class_names=class_names, + score_thr=score_thr, + show=out_file is None, + out_file=out_file) diff --git a/mmdet/apis/train.py b/mmdet/apis/train.py new file mode 100644 index 0000000..44ec2cc --- /dev/null +++ b/mmdet/apis/train.py @@ -0,0 +1,197 @@ +from __future__ import division + +import re +from collections import OrderedDict + +import torch +from mmcv.runner import Runner, DistSamplerSeedHook, obj_from_dict +from mmcv.parallel import MMDataParallel, MMDistributedDataParallel + +from mmdet import datasets +from mmdet.core import (DistOptimizerHook, DistEvalmAPHook, + CocoDistEvalRecallHook, CocoDistEvalmAPHook) +from mmdet.datasets import build_dataloader +from mmdet.models import RPN +from .env import get_root_logger + + +def parse_losses(losses): + log_vars = OrderedDict() + for loss_name, loss_value in losses.items(): + if isinstance(loss_value, torch.Tensor): + log_vars[loss_name] = loss_value.mean() + elif isinstance(loss_value, list): + log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) + else: + raise TypeError( + '{} is not a tensor or list of tensors'.format(loss_name)) + + loss = sum(_value for _key, _value in log_vars.items() if 'loss' in _key) + + log_vars['loss'] = loss + for name in log_vars: + log_vars[name] = log_vars[name].item() + + return loss, log_vars + + +def batch_processor(model, data, train_mode): + losses = model(**data) + loss, log_vars = parse_losses(losses) + + outputs = dict( + loss=loss, log_vars=log_vars, num_samples=len(data['img'].data)) + + return outputs + + +def train_detector(model, + dataset, + cfg, + distributed=False, + validate=False, + logger=None): + if logger is None: + logger = get_root_logger(cfg.log_level) + + # start training + if distributed: + _dist_train(model, dataset, cfg, validate=validate) + else: + _non_dist_train(model, dataset, cfg, validate=validate) + + +def build_optimizer(model, optimizer_cfg): + """Build optimizer from configs. + + Args: + model (:obj:`nn.Module`): The model with parameters to be optimized. + optimizer_cfg (dict): The config dict of the optimizer. + Positional fields are: + - type: class name of the optimizer. + - lr: base learning rate. + Optional fields are: + - any arguments of the corresponding optimizer type, e.g., + weight_decay, momentum, etc. + - paramwise_options: a dict with 3 accepted fileds + (bias_lr_mult, bias_decay_mult, norm_decay_mult). + `bias_lr_mult` and `bias_decay_mult` will be multiplied to + the lr and weight decay respectively for all bias parameters + (except for the normalization layers), and + `norm_decay_mult` will be multiplied to the weight decay + for all weight and bias parameters of normalization layers. + + Returns: + torch.optim.Optimizer: The initialized optimizer. + """ + if hasattr(model, 'module'): + model = model.module + + optimizer_cfg = optimizer_cfg.copy() + paramwise_options = optimizer_cfg.pop('paramwise_options', None) + # if no paramwise option is specified, just use the global setting + if paramwise_options is None: + return obj_from_dict( + optimizer_cfg, torch.optim, dict(params=model.parameters())) + else: + assert isinstance(paramwise_options, dict) + # get base lr and weight decay + base_lr = optimizer_cfg['lr'] + base_wd = optimizer_cfg.get('weight_decay', None) + # weight_decay must be explicitly specified if mult is specified + if ('bias_decay_mult' in paramwise_options + or 'norm_decay_mult' in paramwise_options): + assert base_wd is not None + # get param-wise options + bias_lr_mult = paramwise_options.get('bias_lr_mult', 1.) + bias_decay_mult = paramwise_options.get('bias_decay_mult', 1.) + norm_decay_mult = paramwise_options.get('norm_decay_mult', 1.) + # set param-wise lr and weight decay + params = [] + for name, param in model.named_parameters(): + if not param.requires_grad: + continue + + param_group = {'params': [param]} + # for norm layers, overwrite the weight decay of weight and bias + # TODO: obtain the norm layer prefixes dynamically + if re.search(r'(bn|gn)(\d+)?.(weight|bias)', name): + if base_wd is not None: + param_group['weight_decay'] = base_wd * norm_decay_mult + # for other layers, overwrite both lr and weight decay of bias + elif name.endswith('.bias'): + param_group['lr'] = base_lr * bias_lr_mult + if base_wd is not None: + param_group['weight_decay'] = base_wd * bias_decay_mult + # otherwise use the global settings + + params.append(param_group) + + optimizer_cls = getattr(torch.optim, optimizer_cfg.pop('type')) + return optimizer_cls(params, **optimizer_cfg) + + +def _dist_train(model, dataset, cfg, validate=False): + # prepare data loaders + data_loaders = [ + build_dataloader( + dataset, + cfg.data.imgs_per_gpu, + cfg.data.workers_per_gpu, + dist=True) + ] + # put model on gpus + model = MMDistributedDataParallel(model.cuda()) + # build runner + optimizer = build_optimizer(model, cfg.optimizer) + runner = Runner(model, batch_processor, optimizer, cfg.work_dir, + cfg.log_level) + # register hooks + optimizer_config = DistOptimizerHook(**cfg.optimizer_config) + runner.register_training_hooks(cfg.lr_config, optimizer_config, + cfg.checkpoint_config, cfg.log_config) + runner.register_hook(DistSamplerSeedHook()) + # register eval hooks + if validate: + val_dataset_cfg = cfg.data.val + if isinstance(model.module, RPN): + # TODO: implement recall hooks for other datasets + runner.register_hook(CocoDistEvalRecallHook(val_dataset_cfg)) + else: + dataset_type = getattr(datasets, val_dataset_cfg.type) + if issubclass(dataset_type, datasets.CocoDataset): + runner.register_hook(CocoDistEvalmAPHook(val_dataset_cfg)) + else: + runner.register_hook(DistEvalmAPHook(val_dataset_cfg)) + + if cfg.resume_from: + runner.resume(cfg.resume_from) + elif cfg.load_from: + runner.load_checkpoint(cfg.load_from) + runner.run(data_loaders, cfg.workflow, cfg.total_epochs) + + +def _non_dist_train(model, dataset, cfg, validate=False): + # prepare data loaders + data_loaders = [ + build_dataloader( + dataset, + cfg.data.imgs_per_gpu, + cfg.data.workers_per_gpu, + cfg.gpus, + dist=False) + ] + # put model on gpus + model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda() + # build runner + optimizer = build_optimizer(model, cfg.optimizer) + runner = Runner(model, batch_processor, optimizer, cfg.work_dir, + cfg.log_level) + runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config, + cfg.checkpoint_config, cfg.log_config) + + if cfg.resume_from: + runner.resume(cfg.resume_from) + elif cfg.load_from: + runner.load_checkpoint(cfg.load_from) + runner.run(data_loaders, cfg.workflow, cfg.total_epochs) diff --git a/mmdet/core/__init__.py b/mmdet/core/__init__.py new file mode 100644 index 0000000..645d5be --- /dev/null +++ b/mmdet/core/__init__.py @@ -0,0 +1,7 @@ +from .anchor import * # noqa: F401, F403 +from .bbox import * # noqa: F401, F403 +from .mask import * # noqa: F401, F403 +from .loss import * # noqa: F401, F403 +from .evaluation import * # noqa: F401, F403 +from .post_processing import * # noqa: F401, F403 +from .utils import * # noqa: F401, F403 diff --git a/mmdet/core/anchor/__init__.py b/mmdet/core/anchor/__init__.py new file mode 100644 index 0000000..0ff430a --- /dev/null +++ b/mmdet/core/anchor/__init__.py @@ -0,0 +1,4 @@ +from .anchor_generator import AnchorGenerator +from .anchor_target import anchor_target + +__all__ = ['AnchorGenerator', 'anchor_target'] diff --git a/mmdet/core/anchor/anchor_generator.py b/mmdet/core/anchor/anchor_generator.py new file mode 100644 index 0000000..8995ea6 --- /dev/null +++ b/mmdet/core/anchor/anchor_generator.py @@ -0,0 +1,84 @@ +import torch + + +class AnchorGenerator(object): + + def __init__(self, base_size, scales, ratios, scale_major=True, ctr=None): + self.base_size = base_size + self.scales = torch.Tensor(scales) + self.ratios = torch.Tensor(ratios) + self.scale_major = scale_major + self.ctr = ctr + self.base_anchors = self.gen_base_anchors() + + @property + def num_base_anchors(self): + return self.base_anchors.size(0) + + def gen_base_anchors(self): + w = self.base_size + h = self.base_size + if self.ctr is None: + x_ctr = 0.5 * (w - 1) + y_ctr = 0.5 * (h - 1) + else: + x_ctr, y_ctr = self.ctr + + h_ratios = torch.sqrt(self.ratios) + w_ratios = 1 / h_ratios + if self.scale_major: + ws = (w * w_ratios[:, None] * self.scales[None, :]).view(-1) + hs = (h * h_ratios[:, None] * self.scales[None, :]).view(-1) + else: + ws = (w * self.scales[:, None] * w_ratios[None, :]).view(-1) + hs = (h * self.scales[:, None] * h_ratios[None, :]).view(-1) + + base_anchors = torch.stack( + [ + x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), + x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1) + ], + dim=-1).round() + + return base_anchors + + def _meshgrid(self, x, y, row_major=True): + xx = x.repeat(len(y)) + yy = y.view(-1, 1).repeat(1, len(x)).view(-1) + if row_major: + return xx, yy + else: + return yy, xx + + def grid_anchors(self, featmap_size, stride=16, device='cuda'): + base_anchors = self.base_anchors.to(device) + + feat_h, feat_w = featmap_size + shift_x = torch.arange(0, feat_w, device=device) * stride + shift_y = torch.arange(0, feat_h, device=device) * stride + shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) + shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1) + shifts = shifts.type_as(base_anchors) + # first feat_w elements correspond to the first row of shifts + # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get + # shifted anchors (K, A, 4), reshape to (K*A, 4) + + all_anchors = base_anchors[None, :, :] + shifts[:, None, :] + all_anchors = all_anchors.view(-1, 4) + # first A rows correspond to A anchors of (0, 0) in feature map, + # then (0, 1), (0, 2), ... + return all_anchors + + def valid_flags(self, featmap_size, valid_size, device='cuda'): + feat_h, feat_w = featmap_size + valid_h, valid_w = valid_size + assert valid_h <= feat_h and valid_w <= feat_w + valid_x = torch.zeros(feat_w, dtype=torch.uint8, device=device) + valid_y = torch.zeros(feat_h, dtype=torch.uint8, device=device) + valid_x[:valid_w] = 1 + valid_y[:valid_h] = 1 + valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) + valid = valid_xx & valid_yy + valid = valid[:, None].expand( + valid.size(0), self.num_base_anchors).contiguous().view(-1) + return valid diff --git a/mmdet/core/anchor/anchor_target.py b/mmdet/core/anchor/anchor_target.py new file mode 100644 index 0000000..60c902e --- /dev/null +++ b/mmdet/core/anchor/anchor_target.py @@ -0,0 +1,186 @@ +import torch + +from ..bbox import assign_and_sample, build_assigner, PseudoSampler, bbox2delta +from ..utils import multi_apply + + +def anchor_target(anchor_list, + valid_flag_list, + gt_bboxes_list, + img_metas, + target_means, + target_stds, + cfg, + gt_bboxes_ignore_list=None, + gt_labels_list=None, + label_channels=1, + sampling=True, + unmap_outputs=True): + """Compute regression and classification targets for anchors. + + Args: + anchor_list (list[list]): Multi level anchors of each image. + valid_flag_list (list[list]): Multi level valid flags of each image. + gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. + img_metas (list[dict]): Meta info of each image. + target_means (Iterable): Mean value of regression targets. + target_stds (Iterable): Std value of regression targets. + cfg (dict): RPN train configs. + + Returns: + tuple + """ + num_imgs = len(img_metas) + assert len(anchor_list) == len(valid_flag_list) == num_imgs + + # anchor number of multi levels + num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] + # concat all level anchors and flags to a single tensor + for i in range(num_imgs): + assert len(anchor_list[i]) == len(valid_flag_list[i]) + anchor_list[i] = torch.cat(anchor_list[i]) + valid_flag_list[i] = torch.cat(valid_flag_list[i]) + + # compute targets for each image + if gt_bboxes_ignore_list is None: + gt_bboxes_ignore_list = [None for _ in range(num_imgs)] + if gt_labels_list is None: + gt_labels_list = [None for _ in range(num_imgs)] + (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, + pos_inds_list, neg_inds_list) = multi_apply( + anchor_target_single, + anchor_list, + valid_flag_list, + gt_bboxes_list, + gt_bboxes_ignore_list, + gt_labels_list, + img_metas, + target_means=target_means, + target_stds=target_stds, + cfg=cfg, + label_channels=label_channels, + sampling=sampling, + unmap_outputs=unmap_outputs) + # no valid anchors + if any([labels is None for labels in all_labels]): + return None + # sampled anchors of all images + num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) + num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) + # split targets to a list w.r.t. multiple levels + labels_list = images_to_levels(all_labels, num_level_anchors) + label_weights_list = images_to_levels(all_label_weights, num_level_anchors) + bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors) + bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors) + return (labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, num_total_pos, num_total_neg) + + +def images_to_levels(target, num_level_anchors): + """Convert targets by image to targets by feature level. + + [target_img0, target_img1] -> [target_level0, target_level1, ...] + """ + target = torch.stack(target, 0) + level_targets = [] + start = 0 + for n in num_level_anchors: + end = start + n + level_targets.append(target[:, start:end].squeeze(0)) + start = end + return level_targets + + +def anchor_target_single(flat_anchors, + valid_flags, + gt_bboxes, + gt_bboxes_ignore, + gt_labels, + img_meta, + target_means, + target_stds, + cfg, + label_channels=1, + sampling=True, + unmap_outputs=True): + inside_flags = anchor_inside_flags(flat_anchors, valid_flags, + img_meta['img_shape'][:2], + cfg.allowed_border) + if not inside_flags.any(): + return (None, ) * 6 + # assign gt and sample anchors + anchors = flat_anchors[inside_flags, :] + + if sampling: + assign_result, sampling_result = assign_and_sample( + anchors, gt_bboxes, gt_bboxes_ignore, None, cfg) + else: + bbox_assigner = build_assigner(cfg.assigner) + assign_result = bbox_assigner.assign(anchors, gt_bboxes, + gt_bboxes_ignore, gt_labels) + bbox_sampler = PseudoSampler() + sampling_result = bbox_sampler.sample(assign_result, anchors, + gt_bboxes) + + num_valid_anchors = anchors.shape[0] + bbox_targets = torch.zeros_like(anchors) + bbox_weights = torch.zeros_like(anchors) + labels = anchors.new_zeros(num_valid_anchors, dtype=torch.long) + label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) + + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + if len(pos_inds) > 0: + pos_bbox_targets = bbox2delta(sampling_result.pos_bboxes, + sampling_result.pos_gt_bboxes, + target_means, target_stds) + bbox_targets[pos_inds, :] = pos_bbox_targets + bbox_weights[pos_inds, :] = 1.0 + if gt_labels is None: + labels[pos_inds] = 1 + else: + labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] + if cfg.pos_weight <= 0: + label_weights[pos_inds] = 1.0 + else: + label_weights[pos_inds] = cfg.pos_weight + if len(neg_inds) > 0: + label_weights[neg_inds] = 1.0 + + # map up to original set of anchors + if unmap_outputs: + num_total_anchors = flat_anchors.size(0) + labels = unmap(labels, num_total_anchors, inside_flags) + label_weights = unmap(label_weights, num_total_anchors, inside_flags) + bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) + bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) + + return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, + neg_inds) + + +def anchor_inside_flags(flat_anchors, valid_flags, img_shape, + allowed_border=0): + img_h, img_w = img_shape[:2] + if allowed_border >= 0: + inside_flags = valid_flags & \ + (flat_anchors[:, 0] >= -allowed_border) & \ + (flat_anchors[:, 1] >= -allowed_border) & \ + (flat_anchors[:, 2] < img_w + allowed_border) & \ + (flat_anchors[:, 3] < img_h + allowed_border) + else: + inside_flags = valid_flags + return inside_flags + + +def unmap(data, count, inds, fill=0): + """ Unmap a subset of item (data) back to the original set of items (of + size count) """ + if data.dim() == 1: + ret = data.new_full((count, ), fill) + ret[inds] = data + else: + new_size = (count, ) + data.size()[1:] + ret = data.new_full(new_size, fill) + ret[inds, :] = data + return ret diff --git a/mmdet/core/bbox/__init__.py b/mmdet/core/bbox/__init__.py new file mode 100644 index 0000000..bcf6efd --- /dev/null +++ b/mmdet/core/bbox/__init__.py @@ -0,0 +1,20 @@ +from .geometry import bbox_overlaps +from .assigners import BaseAssigner, MaxIoUAssigner, AssignResult +from .samplers import (BaseSampler, PseudoSampler, RandomSampler, + InstanceBalancedPosSampler, IoUBalancedNegSampler, + CombinedSampler, SamplingResult) +from .assign_sampling import build_assigner, build_sampler, assign_and_sample +from .transforms import (bbox2delta, delta2bbox, bbox_flip, bbox_mapping, + bbox_mapping_back, bbox2roi, roi2bbox, bbox2result, + distance2bbox) +from .bbox_target import bbox_target + +__all__ = [ + 'bbox_overlaps', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult', + 'BaseSampler', 'PseudoSampler', 'RandomSampler', + 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', + 'SamplingResult', 'build_assigner', 'build_sampler', 'assign_and_sample', + 'bbox2delta', 'delta2bbox', 'bbox_flip', 'bbox_mapping', + 'bbox_mapping_back', 'bbox2roi', 'roi2bbox', 'bbox2result', + 'distance2bbox', 'bbox_target' +] diff --git a/mmdet/core/bbox/assign_sampling.py b/mmdet/core/bbox/assign_sampling.py new file mode 100644 index 0000000..4267174 --- /dev/null +++ b/mmdet/core/bbox/assign_sampling.py @@ -0,0 +1,33 @@ +import mmcv + +from . import assigners, samplers + + +def build_assigner(cfg, **kwargs): + if isinstance(cfg, assigners.BaseAssigner): + return cfg + elif isinstance(cfg, dict): + return mmcv.runner.obj_from_dict(cfg, assigners, default_args=kwargs) + else: + raise TypeError('Invalid type {} for building a sampler'.format( + type(cfg))) + + +def build_sampler(cfg, **kwargs): + if isinstance(cfg, samplers.BaseSampler): + return cfg + elif isinstance(cfg, dict): + return mmcv.runner.obj_from_dict(cfg, samplers, default_args=kwargs) + else: + raise TypeError('Invalid type {} for building a sampler'.format( + type(cfg))) + + +def assign_and_sample(bboxes, gt_bboxes, gt_bboxes_ignore, gt_labels, cfg): + bbox_assigner = build_assigner(cfg.assigner) + bbox_sampler = build_sampler(cfg.sampler) + assign_result = bbox_assigner.assign(bboxes, gt_bboxes, gt_bboxes_ignore, + gt_labels) + sampling_result = bbox_sampler.sample(assign_result, bboxes, gt_bboxes, + gt_labels) + return assign_result, sampling_result diff --git a/mmdet/core/bbox/assigners/__init__.py b/mmdet/core/bbox/assigners/__init__.py new file mode 100644 index 0000000..40a89e9 --- /dev/null +++ b/mmdet/core/bbox/assigners/__init__.py @@ -0,0 +1,5 @@ +from .base_assigner import BaseAssigner +from .max_iou_assigner import MaxIoUAssigner +from .assign_result import AssignResult + +__all__ = ['BaseAssigner', 'MaxIoUAssigner', 'AssignResult'] diff --git a/mmdet/core/bbox/assigners/assign_result.py b/mmdet/core/bbox/assigners/assign_result.py new file mode 100644 index 0000000..33c761d --- /dev/null +++ b/mmdet/core/bbox/assigners/assign_result.py @@ -0,0 +1,19 @@ +import torch + + +class AssignResult(object): + + def __init__(self, num_gts, gt_inds, max_overlaps, labels=None): + self.num_gts = num_gts + self.gt_inds = gt_inds + self.max_overlaps = max_overlaps + self.labels = labels + + def add_gt_(self, gt_labels): + self_inds = torch.arange( + 1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device) + self.gt_inds = torch.cat([self_inds, self.gt_inds]) + self.max_overlaps = torch.cat( + [self.max_overlaps.new_ones(self.num_gts), self.max_overlaps]) + if self.labels is not None: + self.labels = torch.cat([gt_labels, self.labels]) diff --git a/mmdet/core/bbox/assigners/base_assigner.py b/mmdet/core/bbox/assigners/base_assigner.py new file mode 100644 index 0000000..7bd02dc --- /dev/null +++ b/mmdet/core/bbox/assigners/base_assigner.py @@ -0,0 +1,8 @@ +from abc import ABCMeta, abstractmethod + + +class BaseAssigner(metaclass=ABCMeta): + + @abstractmethod + def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): + pass diff --git a/mmdet/core/bbox/assigners/max_iou_assigner.py b/mmdet/core/bbox/assigners/max_iou_assigner.py new file mode 100644 index 0000000..57a1e75 --- /dev/null +++ b/mmdet/core/bbox/assigners/max_iou_assigner.py @@ -0,0 +1,152 @@ +import torch + +from .base_assigner import BaseAssigner +from .assign_result import AssignResult +from ..geometry import bbox_overlaps + + +class MaxIoUAssigner(BaseAssigner): + """Assign a corresponding gt bbox or background to each bbox. + + Each proposals will be assigned with `-1`, `0`, or a positive integer + indicating the ground truth index. + + - -1: don't care + - 0: negative sample, no assigned gt + - positive integer: positive sample, index (1-based) of assigned gt + + Args: + pos_iou_thr (float): IoU threshold for positive bboxes. + neg_iou_thr (float or tuple): IoU threshold for negative bboxes. + min_pos_iou (float): Minimum iou for a bbox to be considered as a + positive bbox. Positive samples can have smaller IoU than + pos_iou_thr due to the 4th step (assign max IoU sample to each gt). + gt_max_assign_all (bool): Whether to assign all bboxes with the same + highest overlap with some gt to that gt. + ignore_iof_thr (float): IoF threshold for ignoring bboxes (if + `gt_bboxes_ignore` is specified). Negative values mean not + ignoring any bboxes. + ignore_wrt_candidates (bool): Whether to compute the iof between + `bboxes` and `gt_bboxes_ignore`, or the contrary. + """ + + def __init__(self, + pos_iou_thr, + neg_iou_thr, + min_pos_iou=.0, + gt_max_assign_all=True, + ignore_iof_thr=-1, + ignore_wrt_candidates=True): + self.pos_iou_thr = pos_iou_thr + self.neg_iou_thr = neg_iou_thr + self.min_pos_iou = min_pos_iou + self.gt_max_assign_all = gt_max_assign_all + self.ignore_iof_thr = ignore_iof_thr + self.ignore_wrt_candidates = ignore_wrt_candidates + + def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): + """Assign gt to bboxes. + + This method assign a gt bbox to every bbox (proposal/anchor), each bbox + will be assigned with -1, 0, or a positive number. -1 means don't care, + 0 means negative sample, positive number is the index (1-based) of + assigned gt. + The assignment is done in following steps, the order matters. + + 1. assign every bbox to -1 + 2. assign proposals whose iou with all gts < neg_iou_thr to 0 + 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr, + assign it to that bbox + 4. for each gt bbox, assign its nearest proposals (may be more than + one) to itself + + Args: + bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). + gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). + gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are + labelled as `ignored`, e.g., crowd boxes in COCO. + gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). + + Returns: + :obj:`AssignResult`: The assign result. + """ + if bboxes.shape[0] == 0 or gt_bboxes.shape[0] == 0: + raise ValueError('No gt or bboxes') + bboxes = bboxes[:, :4] + overlaps = bbox_overlaps(gt_bboxes, bboxes) + + if (self.ignore_iof_thr > 0) and (gt_bboxes_ignore is not None) and ( + gt_bboxes_ignore.numel() > 0): + if self.ignore_wrt_candidates: + ignore_overlaps = bbox_overlaps( + bboxes, gt_bboxes_ignore, mode='iof') + ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) + else: + ignore_overlaps = bbox_overlaps( + gt_bboxes_ignore, bboxes, mode='iof') + ignore_max_overlaps, _ = ignore_overlaps.max(dim=0) + overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1 + + assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) + return assign_result + + def assign_wrt_overlaps(self, overlaps, gt_labels=None): + """Assign w.r.t. the overlaps of bboxes with gts. + + Args: + overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes, + shape(k, n). + gt_labels (Tensor, optional): Labels of k gt_bboxes, shape (k, ). + + Returns: + :obj:`AssignResult`: The assign result. + """ + if overlaps.numel() == 0: + raise ValueError('No gt or proposals') + + num_gts, num_bboxes = overlaps.size(0), overlaps.size(1) + + # 1. assign -1 by default + assigned_gt_inds = overlaps.new_full( + (num_bboxes, ), -1, dtype=torch.long) + + # for each anchor, which gt best overlaps with it + # for each anchor, the max iou of all gts + max_overlaps, argmax_overlaps = overlaps.max(dim=0) + # for each gt, which anchor best overlaps with it + # for each gt, the max iou of all proposals + gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1) + + # 2. assign negative: below + if isinstance(self.neg_iou_thr, float): + assigned_gt_inds[(max_overlaps >= 0) + & (max_overlaps < self.neg_iou_thr)] = 0 + elif isinstance(self.neg_iou_thr, tuple): + assert len(self.neg_iou_thr) == 2 + assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0]) + & (max_overlaps < self.neg_iou_thr[1])] = 0 + + # 3. assign positive: above positive IoU threshold + pos_inds = max_overlaps >= self.pos_iou_thr + assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1 + + # 4. assign fg: for each gt, proposals with highest IoU + for i in range(num_gts): + if gt_max_overlaps[i] >= self.min_pos_iou: + if self.gt_max_assign_all: + max_iou_inds = overlaps[i, :] == gt_max_overlaps[i] + assigned_gt_inds[max_iou_inds] = i + 1 + else: + assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1 + + if gt_labels is not None: + assigned_labels = assigned_gt_inds.new_zeros((num_bboxes, )) + pos_inds = torch.nonzero(assigned_gt_inds > 0).squeeze() + if pos_inds.numel() > 0: + assigned_labels[pos_inds] = gt_labels[ + assigned_gt_inds[pos_inds] - 1] + else: + assigned_labels = None + + return AssignResult( + num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels) diff --git a/mmdet/core/bbox/bbox_target.py b/mmdet/core/bbox/bbox_target.py new file mode 100644 index 0000000..aa1fbc6 --- /dev/null +++ b/mmdet/core/bbox/bbox_target.py @@ -0,0 +1,73 @@ +import torch + +from .transforms import bbox2delta +from ..utils import multi_apply + + +def bbox_target(pos_bboxes_list, + neg_bboxes_list, + pos_gt_bboxes_list, + pos_gt_labels_list, + cfg, + reg_classes=1, + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + concat=True): + labels, label_weights, bbox_targets, bbox_weights = multi_apply( + bbox_target_single, + pos_bboxes_list, + neg_bboxes_list, + pos_gt_bboxes_list, + pos_gt_labels_list, + cfg=cfg, + reg_classes=reg_classes, + target_means=target_means, + target_stds=target_stds) + + if concat: + labels = torch.cat(labels, 0) + label_weights = torch.cat(label_weights, 0) + bbox_targets = torch.cat(bbox_targets, 0) + bbox_weights = torch.cat(bbox_weights, 0) + return labels, label_weights, bbox_targets, bbox_weights + + +def bbox_target_single(pos_bboxes, + neg_bboxes, + pos_gt_bboxes, + pos_gt_labels, + cfg, + reg_classes=1, + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]): + num_pos = pos_bboxes.size(0) + num_neg = neg_bboxes.size(0) + num_samples = num_pos + num_neg + labels = pos_bboxes.new_zeros(num_samples, dtype=torch.long) + label_weights = pos_bboxes.new_zeros(num_samples) + bbox_targets = pos_bboxes.new_zeros(num_samples, 4) + bbox_weights = pos_bboxes.new_zeros(num_samples, 4) + if num_pos > 0: + labels[:num_pos] = pos_gt_labels + pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight + label_weights[:num_pos] = pos_weight + pos_bbox_targets = bbox2delta(pos_bboxes, pos_gt_bboxes, target_means, + target_stds) + bbox_targets[:num_pos, :] = pos_bbox_targets + bbox_weights[:num_pos, :] = 1 + if num_neg > 0: + label_weights[-num_neg:] = 1.0 + + return labels, label_weights, bbox_targets, bbox_weights + + +def expand_target(bbox_targets, bbox_weights, labels, num_classes): + bbox_targets_expand = bbox_targets.new_zeros((bbox_targets.size(0), + 4 * num_classes)) + bbox_weights_expand = bbox_weights.new_zeros((bbox_weights.size(0), + 4 * num_classes)) + for i in torch.nonzero(labels > 0).squeeze(-1): + start, end = labels[i] * 4, (labels[i] + 1) * 4 + bbox_targets_expand[i, start:end] = bbox_targets[i, :] + bbox_weights_expand[i, start:end] = bbox_weights[i, :] + return bbox_targets_expand, bbox_weights_expand diff --git a/mmdet/core/bbox/geometry.py b/mmdet/core/bbox/geometry.py new file mode 100644 index 0000000..3bc8dae --- /dev/null +++ b/mmdet/core/bbox/geometry.py @@ -0,0 +1,63 @@ +import torch + + +def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False): + """Calculate overlap between two set of bboxes. + + If ``is_aligned`` is ``False``, then calculate the ious between each bbox + of bboxes1 and bboxes2, otherwise the ious between each aligned pair of + bboxes1 and bboxes2. + + Args: + bboxes1 (Tensor): shape (m, 4) + bboxes2 (Tensor): shape (n, 4), if is_aligned is ``True``, then m and n + must be equal. + mode (str): "iou" (intersection over union) or iof (intersection over + foreground). + + Returns: + ious(Tensor): shape (m, n) if is_aligned == False else shape (m, 1) + """ + + assert mode in ['iou', 'iof'] + + rows = bboxes1.size(0) + cols = bboxes2.size(0) + if is_aligned: + assert rows == cols + + if rows * cols == 0: + return bboxes1.new(rows, 1) if is_aligned else bboxes1.new(rows, cols) + + if is_aligned: + lt = torch.max(bboxes1[:, :2], bboxes2[:, :2]) # [rows, 2] + rb = torch.min(bboxes1[:, 2:], bboxes2[:, 2:]) # [rows, 2] + + wh = (rb - lt + 1).clamp(min=0) # [rows, 2] + overlap = wh[:, 0] * wh[:, 1] + area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * ( + bboxes1[:, 3] - bboxes1[:, 1] + 1) + + if mode == 'iou': + area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * ( + bboxes2[:, 3] - bboxes2[:, 1] + 1) + ious = overlap / (area1 + area2 - overlap) + else: + ious = overlap / area1 + else: + lt = torch.max(bboxes1[:, None, :2], bboxes2[:, :2]) # [rows, cols, 2] + rb = torch.min(bboxes1[:, None, 2:], bboxes2[:, 2:]) # [rows, cols, 2] + + wh = (rb - lt + 1).clamp(min=0) # [rows, cols, 2] + overlap = wh[:, :, 0] * wh[:, :, 1] + area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * ( + bboxes1[:, 3] - bboxes1[:, 1] + 1) + + if mode == 'iou': + area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * ( + bboxes2[:, 3] - bboxes2[:, 1] + 1) + ious = overlap / (area1[:, None] + area2 - overlap) + else: + ious = overlap / (area1[:, None]) + + return ious diff --git a/mmdet/core/bbox/samplers/__init__.py b/mmdet/core/bbox/samplers/__init__.py new file mode 100644 index 0000000..167044f --- /dev/null +++ b/mmdet/core/bbox/samplers/__init__.py @@ -0,0 +1,14 @@ +from .base_sampler import BaseSampler +from .pseudo_sampler import PseudoSampler +from .random_sampler import RandomSampler +from .instance_balanced_pos_sampler import InstanceBalancedPosSampler +from .iou_balanced_neg_sampler import IoUBalancedNegSampler +from .combined_sampler import CombinedSampler +from .ohem_sampler import OHEMSampler +from .sampling_result import SamplingResult + +__all__ = [ + 'BaseSampler', 'PseudoSampler', 'RandomSampler', + 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', + 'OHEMSampler', 'SamplingResult' +] diff --git a/mmdet/core/bbox/samplers/base_sampler.py b/mmdet/core/bbox/samplers/base_sampler.py new file mode 100644 index 0000000..12df013 --- /dev/null +++ b/mmdet/core/bbox/samplers/base_sampler.py @@ -0,0 +1,78 @@ +from abc import ABCMeta, abstractmethod + +import torch + +from .sampling_result import SamplingResult + + +class BaseSampler(metaclass=ABCMeta): + + def __init__(self, + num, + pos_fraction, + neg_pos_ub=-1, + add_gt_as_proposals=True, + **kwargs): + self.num = num + self.pos_fraction = pos_fraction + self.neg_pos_ub = neg_pos_ub + self.add_gt_as_proposals = add_gt_as_proposals + self.pos_sampler = self + self.neg_sampler = self + + @abstractmethod + def _sample_pos(self, assign_result, num_expected, **kwargs): + pass + + @abstractmethod + def _sample_neg(self, assign_result, num_expected, **kwargs): + pass + + def sample(self, + assign_result, + bboxes, + gt_bboxes, + gt_labels=None, + **kwargs): + """Sample positive and negative bboxes. + + This is a simple implementation of bbox sampling given candidates, + assigning results and ground truth bboxes. + + Args: + assign_result (:obj:`AssignResult`): Bbox assigning results. + bboxes (Tensor): Boxes to be sampled from. + gt_bboxes (Tensor): Ground truth bboxes. + gt_labels (Tensor, optional): Class labels of ground truth bboxes. + + Returns: + :obj:`SamplingResult`: Sampling result. + """ + bboxes = bboxes[:, :4] + + gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8) + if self.add_gt_as_proposals: + bboxes = torch.cat([gt_bboxes, bboxes], dim=0) + assign_result.add_gt_(gt_labels) + gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8) + gt_flags = torch.cat([gt_ones, gt_flags]) + + num_expected_pos = int(self.num * self.pos_fraction) + pos_inds = self.pos_sampler._sample_pos( + assign_result, num_expected_pos, bboxes=bboxes, **kwargs) + # We found that sampled indices have duplicated items occasionally. + # (may be a bug of PyTorch) + pos_inds = pos_inds.unique() + num_sampled_pos = pos_inds.numel() + num_expected_neg = self.num - num_sampled_pos + if self.neg_pos_ub >= 0: + _pos = max(1, num_sampled_pos) + neg_upper_bound = int(self.neg_pos_ub * _pos) + if num_expected_neg > neg_upper_bound: + num_expected_neg = neg_upper_bound + neg_inds = self.neg_sampler._sample_neg( + assign_result, num_expected_neg, bboxes=bboxes, **kwargs) + neg_inds = neg_inds.unique() + + return SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, + assign_result, gt_flags) diff --git a/mmdet/core/bbox/samplers/combined_sampler.py b/mmdet/core/bbox/samplers/combined_sampler.py new file mode 100644 index 0000000..25e820b --- /dev/null +++ b/mmdet/core/bbox/samplers/combined_sampler.py @@ -0,0 +1,16 @@ +from .base_sampler import BaseSampler +from ..assign_sampling import build_sampler + + +class CombinedSampler(BaseSampler): + + def __init__(self, pos_sampler, neg_sampler, **kwargs): + super(CombinedSampler, self).__init__(**kwargs) + self.pos_sampler = build_sampler(pos_sampler, **kwargs) + self.neg_sampler = build_sampler(neg_sampler, **kwargs) + + def _sample_pos(self, **kwargs): + raise NotImplementedError + + def _sample_neg(self, **kwargs): + raise NotImplementedError diff --git a/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py b/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py new file mode 100644 index 0000000..bc829a2 --- /dev/null +++ b/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py @@ -0,0 +1,41 @@ +import numpy as np +import torch + +from .random_sampler import RandomSampler + + +class InstanceBalancedPosSampler(RandomSampler): + + def _sample_pos(self, assign_result, num_expected, **kwargs): + pos_inds = torch.nonzero(assign_result.gt_inds > 0) + if pos_inds.numel() != 0: + pos_inds = pos_inds.squeeze(1) + if pos_inds.numel() <= num_expected: + return pos_inds + else: + unique_gt_inds = assign_result.gt_inds[pos_inds].unique() + num_gts = len(unique_gt_inds) + num_per_gt = int(round(num_expected / float(num_gts)) + 1) + sampled_inds = [] + for i in unique_gt_inds: + inds = torch.nonzero(assign_result.gt_inds == i.item()) + if inds.numel() != 0: + inds = inds.squeeze(1) + else: + continue + if len(inds) > num_per_gt: + inds = self.random_choice(inds, num_per_gt) + sampled_inds.append(inds) + sampled_inds = torch.cat(sampled_inds) + if len(sampled_inds) < num_expected: + num_extra = num_expected - len(sampled_inds) + extra_inds = np.array( + list(set(pos_inds.cpu()) - set(sampled_inds.cpu()))) + if len(extra_inds) > num_extra: + extra_inds = self.random_choice(extra_inds, num_extra) + extra_inds = torch.from_numpy(extra_inds).to( + assign_result.gt_inds.device).long() + sampled_inds = torch.cat([sampled_inds, extra_inds]) + elif len(sampled_inds) > num_expected: + sampled_inds = self.random_choice(sampled_inds, num_expected) + return sampled_inds diff --git a/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py b/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py new file mode 100644 index 0000000..82537ee --- /dev/null +++ b/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py @@ -0,0 +1,62 @@ +import numpy as np +import torch + +from .random_sampler import RandomSampler + + +class IoUBalancedNegSampler(RandomSampler): + + def __init__(self, + num, + pos_fraction, + hard_thr=0.1, + hard_fraction=0.5, + **kwargs): + super(IoUBalancedNegSampler, self).__init__(num, pos_fraction, + **kwargs) + assert hard_thr > 0 + assert 0 < hard_fraction < 1 + self.hard_thr = hard_thr + self.hard_fraction = hard_fraction + + def _sample_neg(self, assign_result, num_expected, **kwargs): + neg_inds = torch.nonzero(assign_result.gt_inds == 0) + if neg_inds.numel() != 0: + neg_inds = neg_inds.squeeze(1) + if len(neg_inds) <= num_expected: + return neg_inds + else: + max_overlaps = assign_result.max_overlaps.cpu().numpy() + # balance sampling for negative samples + neg_set = set(neg_inds.cpu().numpy()) + easy_set = set( + np.where( + np.logical_and(max_overlaps >= 0, + max_overlaps < self.hard_thr))[0]) + hard_set = set(np.where(max_overlaps >= self.hard_thr)[0]) + easy_neg_inds = list(easy_set & neg_set) + hard_neg_inds = list(hard_set & neg_set) + + num_expected_hard = int(num_expected * self.hard_fraction) + if len(hard_neg_inds) > num_expected_hard: + sampled_hard_inds = self.random_choice(hard_neg_inds, + num_expected_hard) + else: + sampled_hard_inds = np.array(hard_neg_inds, dtype=np.int) + num_expected_easy = num_expected - len(sampled_hard_inds) + if len(easy_neg_inds) > num_expected_easy: + sampled_easy_inds = self.random_choice(easy_neg_inds, + num_expected_easy) + else: + sampled_easy_inds = np.array(easy_neg_inds, dtype=np.int) + sampled_inds = np.concatenate((sampled_easy_inds, + sampled_hard_inds)) + if len(sampled_inds) < num_expected: + num_extra = num_expected - len(sampled_inds) + extra_inds = np.array(list(neg_set - set(sampled_inds))) + if len(extra_inds) > num_extra: + extra_inds = self.random_choice(extra_inds, num_extra) + sampled_inds = np.concatenate((sampled_inds, extra_inds)) + sampled_inds = torch.from_numpy(sampled_inds).long().to( + assign_result.gt_inds.device) + return sampled_inds diff --git a/mmdet/core/bbox/samplers/ohem_sampler.py b/mmdet/core/bbox/samplers/ohem_sampler.py new file mode 100644 index 0000000..800a1c2 --- /dev/null +++ b/mmdet/core/bbox/samplers/ohem_sampler.py @@ -0,0 +1,73 @@ +import torch + +from .base_sampler import BaseSampler +from ..transforms import bbox2roi + + +class OHEMSampler(BaseSampler): + + def __init__(self, + num, + pos_fraction, + context, + neg_pos_ub=-1, + add_gt_as_proposals=True, + **kwargs): + super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub, + add_gt_as_proposals) + if not hasattr(context, 'num_stages'): + self.bbox_roi_extractor = context.bbox_roi_extractor + self.bbox_head = context.bbox_head + else: + self.bbox_roi_extractor = context.bbox_roi_extractor[ + context.current_stage] + self.bbox_head = context.bbox_head[context.current_stage] + + def hard_mining(self, inds, num_expected, bboxes, labels, feats): + with torch.no_grad(): + rois = bbox2roi([bboxes]) + bbox_feats = self.bbox_roi_extractor( + feats[:self.bbox_roi_extractor.num_inputs], rois) + cls_score, _ = self.bbox_head(bbox_feats) + loss = self.bbox_head.loss( + cls_score=cls_score, + bbox_pred=None, + labels=labels, + label_weights=cls_score.new_ones(cls_score.size(0)), + bbox_targets=None, + bbox_weights=None, + reduce=False)['loss_cls'] + _, topk_loss_inds = loss.topk(num_expected) + return inds[topk_loss_inds] + + def _sample_pos(self, + assign_result, + num_expected, + bboxes=None, + feats=None, + **kwargs): + # Sample some hard positive samples + pos_inds = torch.nonzero(assign_result.gt_inds > 0) + if pos_inds.numel() != 0: + pos_inds = pos_inds.squeeze(1) + if pos_inds.numel() <= num_expected: + return pos_inds + else: + return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds], + assign_result.labels[pos_inds], feats) + + def _sample_neg(self, + assign_result, + num_expected, + bboxes=None, + feats=None, + **kwargs): + # Sample some hard negative samples + neg_inds = torch.nonzero(assign_result.gt_inds == 0) + if neg_inds.numel() != 0: + neg_inds = neg_inds.squeeze(1) + if len(neg_inds) <= num_expected: + return neg_inds + else: + return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds], + assign_result.labels[neg_inds], feats) diff --git a/mmdet/core/bbox/samplers/pseudo_sampler.py b/mmdet/core/bbox/samplers/pseudo_sampler.py new file mode 100644 index 0000000..b4c2ea0 --- /dev/null +++ b/mmdet/core/bbox/samplers/pseudo_sampler.py @@ -0,0 +1,26 @@ +import torch + +from .base_sampler import BaseSampler +from .sampling_result import SamplingResult + + +class PseudoSampler(BaseSampler): + + def __init__(self, **kwargs): + pass + + def _sample_pos(self, **kwargs): + raise NotImplementedError + + def _sample_neg(self, **kwargs): + raise NotImplementedError + + def sample(self, assign_result, bboxes, gt_bboxes, **kwargs): + pos_inds = torch.nonzero( + assign_result.gt_inds > 0).squeeze(-1).unique() + neg_inds = torch.nonzero( + assign_result.gt_inds == 0).squeeze(-1).unique() + gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8) + sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, + assign_result, gt_flags) + return sampling_result diff --git a/mmdet/core/bbox/samplers/random_sampler.py b/mmdet/core/bbox/samplers/random_sampler.py new file mode 100644 index 0000000..0d02b27 --- /dev/null +++ b/mmdet/core/bbox/samplers/random_sampler.py @@ -0,0 +1,53 @@ +import numpy as np +import torch + +from .base_sampler import BaseSampler + + +class RandomSampler(BaseSampler): + + def __init__(self, + num, + pos_fraction, + neg_pos_ub=-1, + add_gt_as_proposals=True, + **kwargs): + super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub, + add_gt_as_proposals) + + @staticmethod + def random_choice(gallery, num): + """Random select some elements from the gallery. + + It seems that Pytorch's implementation is slower than numpy so we use + numpy to randperm the indices. + """ + assert len(gallery) >= num + if isinstance(gallery, list): + gallery = np.array(gallery) + cands = np.arange(len(gallery)) + np.random.shuffle(cands) + rand_inds = cands[:num] + if not isinstance(gallery, np.ndarray): + rand_inds = torch.from_numpy(rand_inds).long().to(gallery.device) + return gallery[rand_inds] + + def _sample_pos(self, assign_result, num_expected, **kwargs): + """Randomly sample some positive samples.""" + pos_inds = torch.nonzero(assign_result.gt_inds > 0) + if pos_inds.numel() != 0: + pos_inds = pos_inds.squeeze(1) + if pos_inds.numel() <= num_expected: + return pos_inds + else: + return self.random_choice(pos_inds, num_expected) + + def _sample_neg(self, assign_result, num_expected, **kwargs): + """Randomly sample some negative samples.""" + neg_inds = torch.nonzero(assign_result.gt_inds == 0) + if neg_inds.numel() != 0: + neg_inds = neg_inds.squeeze(1) + if len(neg_inds) <= num_expected: + return neg_inds + else: + return self.random_choice(neg_inds, num_expected) diff --git a/mmdet/core/bbox/samplers/sampling_result.py b/mmdet/core/bbox/samplers/sampling_result.py new file mode 100644 index 0000000..696e650 --- /dev/null +++ b/mmdet/core/bbox/samplers/sampling_result.py @@ -0,0 +1,24 @@ +import torch + + +class SamplingResult(object): + + def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, + gt_flags): + self.pos_inds = pos_inds + self.neg_inds = neg_inds + self.pos_bboxes = bboxes[pos_inds] + self.neg_bboxes = bboxes[neg_inds] + self.pos_is_gt = gt_flags[pos_inds] + + self.num_gts = gt_bboxes.shape[0] + self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1 + self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds, :] + if assign_result.labels is not None: + self.pos_gt_labels = assign_result.labels[pos_inds] + else: + self.pos_gt_labels = None + + @property + def bboxes(self): + return torch.cat([self.pos_bboxes, self.neg_bboxes]) diff --git a/mmdet/core/bbox/transforms.py b/mmdet/core/bbox/transforms.py new file mode 100644 index 0000000..580b9bd --- /dev/null +++ b/mmdet/core/bbox/transforms.py @@ -0,0 +1,180 @@ +import mmcv +import numpy as np +import torch + + +def bbox2delta(proposals, gt, means=[0, 0, 0, 0], stds=[1, 1, 1, 1]): + assert proposals.size() == gt.size() + + proposals = proposals.float() + gt = gt.float() + px = (proposals[..., 0] + proposals[..., 2]) * 0.5 + py = (proposals[..., 1] + proposals[..., 3]) * 0.5 + pw = proposals[..., 2] - proposals[..., 0] + 1.0 + ph = proposals[..., 3] - proposals[..., 1] + 1.0 + + gx = (gt[..., 0] + gt[..., 2]) * 0.5 + gy = (gt[..., 1] + gt[..., 3]) * 0.5 + gw = gt[..., 2] - gt[..., 0] + 1.0 + gh = gt[..., 3] - gt[..., 1] + 1.0 + + dx = (gx - px) / pw + dy = (gy - py) / ph + dw = torch.log(gw / pw) + dh = torch.log(gh / ph) + deltas = torch.stack([dx, dy, dw, dh], dim=-1) + + means = deltas.new_tensor(means).unsqueeze(0) + stds = deltas.new_tensor(stds).unsqueeze(0) + deltas = deltas.sub_(means).div_(stds) + + return deltas + + +def delta2bbox(rois, + deltas, + means=[0, 0, 0, 0], + stds=[1, 1, 1, 1], + max_shape=None, + wh_ratio_clip=16 / 1000): + means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 4) + stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 4) + denorm_deltas = deltas * stds + means + dx = denorm_deltas[:, 0::4] + dy = denorm_deltas[:, 1::4] + dw = denorm_deltas[:, 2::4] + dh = denorm_deltas[:, 3::4] + max_ratio = np.abs(np.log(wh_ratio_clip)) + dw = dw.clamp(min=-max_ratio, max=max_ratio) + dh = dh.clamp(min=-max_ratio, max=max_ratio) + px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx) + py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy) + pw = (rois[:, 2] - rois[:, 0] + 1.0).unsqueeze(1).expand_as(dw) + ph = (rois[:, 3] - rois[:, 1] + 1.0).unsqueeze(1).expand_as(dh) + gw = pw * dw.exp() + gh = ph * dh.exp() + gx = torch.addcmul(px, 1, pw, dx) # gx = px + pw * dx + gy = torch.addcmul(py, 1, ph, dy) # gy = py + ph * dy + x1 = gx - gw * 0.5 + 0.5 + y1 = gy - gh * 0.5 + 0.5 + x2 = gx + gw * 0.5 - 0.5 + y2 = gy + gh * 0.5 - 0.5 + if max_shape is not None: + x1 = x1.clamp(min=0, max=max_shape[1] - 1) + y1 = y1.clamp(min=0, max=max_shape[0] - 1) + x2 = x2.clamp(min=0, max=max_shape[1] - 1) + y2 = y2.clamp(min=0, max=max_shape[0] - 1) + bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view_as(deltas) + return bboxes + + +def bbox_flip(bboxes, img_shape): + """Flip bboxes horizontally. + + Args: + bboxes(Tensor or ndarray): Shape (..., 4*k) + img_shape(tuple): Image shape. + + Returns: + Same type as `bboxes`: Flipped bboxes. + """ + if isinstance(bboxes, torch.Tensor): + assert bboxes.shape[-1] % 4 == 0 + flipped = bboxes.clone() + flipped[:, 0::4] = img_shape[1] - bboxes[:, 2::4] - 1 + flipped[:, 2::4] = img_shape[1] - bboxes[:, 0::4] - 1 + return flipped + elif isinstance(bboxes, np.ndarray): + return mmcv.bbox_flip(bboxes, img_shape) + + +def bbox_mapping(bboxes, img_shape, scale_factor, flip): + """Map bboxes from the original image scale to testing scale""" + new_bboxes = bboxes * scale_factor + if flip: + new_bboxes = bbox_flip(new_bboxes, img_shape) + return new_bboxes + + +def bbox_mapping_back(bboxes, img_shape, scale_factor, flip): + """Map bboxes from testing scale to original image scale""" + new_bboxes = bbox_flip(bboxes, img_shape) if flip else bboxes + new_bboxes = new_bboxes / scale_factor + return new_bboxes + + +def bbox2roi(bbox_list): + """Convert a list of bboxes to roi format. + + Args: + bbox_list (list[Tensor]): a list of bboxes corresponding to a batch + of images. + + Returns: + Tensor: shape (n, 5), [batch_ind, x1, y1, x2, y2] + """ + rois_list = [] + for img_id, bboxes in enumerate(bbox_list): + if bboxes.size(0) > 0: + img_inds = bboxes.new_full((bboxes.size(0), 1), img_id) + rois = torch.cat([img_inds, bboxes[:, :4]], dim=-1) + else: + rois = bboxes.new_zeros((0, 5)) + rois_list.append(rois) + rois = torch.cat(rois_list, 0) + return rois + + +def roi2bbox(rois): + bbox_list = [] + img_ids = torch.unique(rois[:, 0].cpu(), sorted=True) + for img_id in img_ids: + inds = (rois[:, 0] == img_id.item()) + bbox = rois[inds, 1:] + bbox_list.append(bbox) + return bbox_list + + +def bbox2result(bboxes, labels, num_classes): + """Convert detection results to a list of numpy arrays. + + Args: + bboxes (Tensor): shape (n, 5) + labels (Tensor): shape (n, ) + num_classes (int): class number, including background class + + Returns: + list(ndarray): bbox results of each class + """ + if bboxes.shape[0] == 0: + return [ + np.zeros((0, 5), dtype=np.float32) for i in range(num_classes - 1) + ] + else: + bboxes = bboxes.cpu().numpy() + labels = labels.cpu().numpy() + return [bboxes[labels == i, :] for i in range(num_classes - 1)] + + +def distance2bbox(points, distance, max_shape=None): + """Decode distance prediction to bounding box. + + Args: + points (Tensor): Shape (n, 2), [x, y]. + distance (Tensor): Distance from the given point to 4 + boundaries (left, top, right, bottom). + max_shape (tuple): Shape of the image. + + Returns: + Tensor: Decoded bboxes. + """ + x1 = points[:, 0] - distance[:, 0] + y1 = points[:, 1] - distance[:, 1] + x2 = points[:, 0] + distance[:, 2] + y2 = points[:, 1] + distance[:, 3] + if max_shape is not None: + x1 = x1.clamp(min=0, max=max_shape[1] - 1) + y1 = y1.clamp(min=0, max=max_shape[0] - 1) + x2 = x2.clamp(min=0, max=max_shape[1] - 1) + y2 = y2.clamp(min=0, max=max_shape[0] - 1) + return torch.stack([x1, y1, x2, y2], -1) diff --git a/mmdet/core/evaluation/__init__.py b/mmdet/core/evaluation/__init__.py new file mode 100644 index 0000000..4585c23 --- /dev/null +++ b/mmdet/core/evaluation/__init__.py @@ -0,0 +1,18 @@ +from .class_names import (voc_classes, imagenet_det_classes, + imagenet_vid_classes, coco_classes, dataset_aliases, + get_classes) +from .coco_utils import coco_eval, fast_eval_recall, results2json +from .eval_hooks import (DistEvalHook, DistEvalmAPHook, CocoDistEvalRecallHook, + CocoDistEvalmAPHook) +from .mean_ap import average_precision, eval_map, print_map_summary +from .recall import (eval_recalls, print_recall_summary, plot_num_recall, + plot_iou_recall) + +__all__ = [ + 'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes', + 'coco_classes', 'dataset_aliases', 'get_classes', 'coco_eval', + 'fast_eval_recall', 'results2json', 'DistEvalHook', 'DistEvalmAPHook', + 'CocoDistEvalRecallHook', 'CocoDistEvalmAPHook', 'average_precision', + 'eval_map', 'print_map_summary', 'eval_recalls', 'print_recall_summary', + 'plot_num_recall', 'plot_iou_recall' +] diff --git a/mmdet/core/evaluation/bbox_overlaps.py b/mmdet/core/evaluation/bbox_overlaps.py new file mode 100644 index 0000000..ad4c705 --- /dev/null +++ b/mmdet/core/evaluation/bbox_overlaps.py @@ -0,0 +1,49 @@ +import numpy as np + + +def bbox_overlaps(bboxes1, bboxes2, mode='iou'): + """Calculate the ious between each bbox of bboxes1 and bboxes2. + + Args: + bboxes1(ndarray): shape (n, 4) + bboxes2(ndarray): shape (k, 4) + mode(str): iou (intersection over union) or iof (intersection + over foreground) + + Returns: + ious(ndarray): shape (n, k) + """ + + assert mode in ['iou', 'iof'] + + bboxes1 = bboxes1.astype(np.float32) + bboxes2 = bboxes2.astype(np.float32) + rows = bboxes1.shape[0] + cols = bboxes2.shape[0] + ious = np.zeros((rows, cols), dtype=np.float32) + if rows * cols == 0: + return ious + exchange = False + if bboxes1.shape[0] > bboxes2.shape[0]: + bboxes1, bboxes2 = bboxes2, bboxes1 + ious = np.zeros((cols, rows), dtype=np.float32) + exchange = True + area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * ( + bboxes1[:, 3] - bboxes1[:, 1] + 1) + area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * ( + bboxes2[:, 3] - bboxes2[:, 1] + 1) + for i in range(bboxes1.shape[0]): + x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0]) + y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1]) + x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2]) + y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3]) + overlap = np.maximum(x_end - x_start + 1, 0) * np.maximum( + y_end - y_start + 1, 0) + if mode == 'iou': + union = area1[i] + area2 - overlap + else: + union = area1[i] if not exchange else area2 + ious[i, :] = overlap / union + if exchange: + ious = ious.T + return ious diff --git a/mmdet/core/evaluation/class_names.py b/mmdet/core/evaluation/class_names.py new file mode 100644 index 0000000..a8d0a3b --- /dev/null +++ b/mmdet/core/evaluation/class_names.py @@ -0,0 +1,103 @@ +import mmcv + + +def voc_classes(): + return [ + 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', + 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', + 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' + ] + + +def imagenet_det_classes(): + return [ + 'accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo', + 'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam', + 'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap', + 'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder', + 'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito', + 'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle', + 'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker', + 'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew', + 'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper', + 'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly', + 'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig', + 'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog', + 'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart', + 'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger', + 'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim', + 'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse', + 'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle', + 'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard', + 'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can', + 'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace', + 'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume', + 'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza', + 'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine', + 'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse', + 'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator', + 'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler', + 'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver', + 'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile', + 'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula', + 'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer', + 'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine', + 'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie', + 'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet', + 'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin', + 'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft', + 'whale', 'wine_bottle', 'zebra' + ] + + +def imagenet_vid_classes(): + return [ + 'airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car', + 'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda', + 'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit', + 'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle', + 'watercraft', 'whale', 'zebra' + ] + + +def coco_classes(): + return [ + 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', + 'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign', + 'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', + 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', + 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', + 'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard', + 'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork', + 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', + 'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair', + 'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', + 'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave', + 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', + 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush' + ] + + +dataset_aliases = { + 'voc': ['voc', 'pascal_voc', 'voc07', 'voc12'], + 'imagenet_det': ['det', 'imagenet_det', 'ilsvrc_det'], + 'imagenet_vid': ['vid', 'imagenet_vid', 'ilsvrc_vid'], + 'coco': ['coco', 'mscoco', 'ms_coco'] +} + + +def get_classes(dataset): + """Get class names of a dataset.""" + alias2name = {} + for name, aliases in dataset_aliases.items(): + for alias in aliases: + alias2name[alias] = name + + if mmcv.is_str(dataset): + if dataset in alias2name: + labels = eval(alias2name[dataset] + '_classes()') + else: + raise ValueError('Unrecognized dataset: {}'.format(dataset)) + else: + raise TypeError('dataset must a str, but got {}'.format(type(dataset))) + return labels diff --git a/mmdet/core/evaluation/coco_utils.py b/mmdet/core/evaluation/coco_utils.py new file mode 100644 index 0000000..0ed056b --- /dev/null +++ b/mmdet/core/evaluation/coco_utils.py @@ -0,0 +1,149 @@ +import mmcv +import numpy as np +from pycocotools.coco import COCO +from pycocotools.cocoeval import COCOeval + +from .recall import eval_recalls + + +def coco_eval(result_file, result_types, coco, max_dets=(100, 300, 1000)): + for res_type in result_types: + assert res_type in [ + 'proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints' + ] + + if mmcv.is_str(coco): + coco = COCO(coco) + assert isinstance(coco, COCO) + + if result_types == ['proposal_fast']: + ar = fast_eval_recall(result_file, coco, np.array(max_dets)) + for i, num in enumerate(max_dets): + print('AR@{}\t= {:.4f}'.format(num, ar[i])) + return + + assert result_file.endswith('.json') + coco_dets = coco.loadRes(result_file) + + img_ids = coco.getImgIds() + for res_type in result_types: + iou_type = 'bbox' if res_type == 'proposal' else res_type + cocoEval = COCOeval(coco, coco_dets, iou_type) + cocoEval.params.imgIds = img_ids + if res_type == 'proposal': + cocoEval.params.useCats = 0 + cocoEval.params.maxDets = list(max_dets) + cocoEval.evaluate() + cocoEval.accumulate() + cocoEval.summarize() + + +def fast_eval_recall(results, + coco, + max_dets, + iou_thrs=np.arange(0.5, 0.96, 0.05)): + if mmcv.is_str(results): + assert results.endswith('.pkl') + results = mmcv.load(results) + elif not isinstance(results, list): + raise TypeError( + 'results must be a list of numpy arrays or a filename, not {}'. + format(type(results))) + + gt_bboxes = [] + img_ids = coco.getImgIds() + for i in range(len(img_ids)): + ann_ids = coco.getAnnIds(imgIds=img_ids[i]) + ann_info = coco.loadAnns(ann_ids) + if len(ann_info) == 0: + gt_bboxes.append(np.zeros((0, 4))) + continue + bboxes = [] + for ann in ann_info: + if ann.get('ignore', False) or ann['iscrowd']: + continue + x1, y1, w, h = ann['bbox'] + bboxes.append([x1, y1, x1 + w - 1, y1 + h - 1]) + bboxes = np.array(bboxes, dtype=np.float32) + if bboxes.shape[0] == 0: + bboxes = np.zeros((0, 4)) + gt_bboxes.append(bboxes) + + recalls = eval_recalls( + gt_bboxes, results, max_dets, iou_thrs, print_summary=False) + ar = recalls.mean(axis=1) + return ar + + +def xyxy2xywh(bbox): + _bbox = bbox.tolist() + return [ + _bbox[0], + _bbox[1], + _bbox[2] - _bbox[0] + 1, + _bbox[3] - _bbox[1] + 1, + ] + + +def proposal2json(dataset, results): + json_results = [] + for idx in range(len(dataset)): + img_id = dataset.img_ids[idx] + bboxes = results[idx] + for i in range(bboxes.shape[0]): + data = dict() + data['image_id'] = img_id + data['bbox'] = xyxy2xywh(bboxes[i]) + data['score'] = float(bboxes[i][4]) + data['category_id'] = 1 + json_results.append(data) + return json_results + + +def det2json(dataset, results): + json_results = [] + for idx in range(len(dataset)): + img_id = dataset.img_ids[idx] + result = results[idx] + for label in range(len(result)): + bboxes = result[label] + for i in range(bboxes.shape[0]): + data = dict() + data['image_id'] = img_id + data['bbox'] = xyxy2xywh(bboxes[i]) + data['score'] = float(bboxes[i][4]) + data['category_id'] = dataset.cat_ids[label] + json_results.append(data) + return json_results + + +def segm2json(dataset, results): + json_results = [] + for idx in range(len(dataset)): + img_id = dataset.img_ids[idx] + det, seg = results[idx] + for label in range(len(det)): + bboxes = det[label] + segms = seg[label] + for i in range(bboxes.shape[0]): + data = dict() + data['image_id'] = img_id + data['bbox'] = xyxy2xywh(bboxes[i]) + data['score'] = float(bboxes[i][4]) + data['category_id'] = dataset.cat_ids[label] + segms[i]['counts'] = segms[i]['counts'].decode() + data['segmentation'] = segms[i] + json_results.append(data) + return json_results + + +def results2json(dataset, results, out_file): + if isinstance(results[0], list): + json_results = det2json(dataset, results) + elif isinstance(results[0], tuple): + json_results = segm2json(dataset, results) + elif isinstance(results[0], np.ndarray): + json_results = proposal2json(dataset, results) + else: + raise TypeError('invalid type of results') + mmcv.dump(json_results, out_file) diff --git a/mmdet/core/evaluation/eval_hooks.py b/mmdet/core/evaluation/eval_hooks.py new file mode 100644 index 0000000..fb12578 --- /dev/null +++ b/mmdet/core/evaluation/eval_hooks.py @@ -0,0 +1,162 @@ +import os +import os.path as osp + +import mmcv +import numpy as np +import torch +import torch.distributed as dist +from mmcv.runner import Hook, obj_from_dict +from mmcv.parallel import scatter, collate +from pycocotools.cocoeval import COCOeval +from torch.utils.data import Dataset + +from .coco_utils import results2json, fast_eval_recall +from .mean_ap import eval_map +from mmdet import datasets + + +class DistEvalHook(Hook): + + def __init__(self, dataset, interval=1): + if isinstance(dataset, Dataset): + self.dataset = dataset + elif isinstance(dataset, dict): + self.dataset = obj_from_dict(dataset, datasets, + {'test_mode': True}) + else: + raise TypeError( + 'dataset must be a Dataset object or a dict, not {}'.format( + type(dataset))) + self.interval = interval + + def after_train_epoch(self, runner): + if not self.every_n_epochs(runner, self.interval): + return + runner.model.eval() + results = [None for _ in range(len(self.dataset))] + if runner.rank == 0: + prog_bar = mmcv.ProgressBar(len(self.dataset)) + for idx in range(runner.rank, len(self.dataset), runner.world_size): + data = self.dataset[idx] + data_gpu = scatter( + collate([data], samples_per_gpu=1), + [torch.cuda.current_device()])[0] + + # compute output + with torch.no_grad(): + result = runner.model( + return_loss=False, rescale=True, **data_gpu) + results[idx] = result + + batch_size = runner.world_size + if runner.rank == 0: + for _ in range(batch_size): + prog_bar.update() + + if runner.rank == 0: + print('\n') + dist.barrier() + for i in range(1, runner.world_size): + tmp_file = osp.join(runner.work_dir, 'temp_{}.pkl'.format(i)) + tmp_results = mmcv.load(tmp_file) + for idx in range(i, len(results), runner.world_size): + results[idx] = tmp_results[idx] + os.remove(tmp_file) + self.evaluate(runner, results) + else: + tmp_file = osp.join(runner.work_dir, + 'temp_{}.pkl'.format(runner.rank)) + mmcv.dump(results, tmp_file) + dist.barrier() + dist.barrier() + + def evaluate(self): + raise NotImplementedError + + +class DistEvalmAPHook(DistEvalHook): + + def evaluate(self, runner, results): + gt_bboxes = [] + gt_labels = [] + gt_ignore = [] if self.dataset.with_crowd else None + for i in range(len(self.dataset)): + ann = self.dataset.get_ann_info(i) + bboxes = ann['bboxes'] + labels = ann['labels'] + if gt_ignore is not None: + ignore = np.concatenate([ + np.zeros(bboxes.shape[0], dtype=np.bool), + np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool) + ]) + gt_ignore.append(ignore) + bboxes = np.vstack([bboxes, ann['bboxes_ignore']]) + labels = np.concatenate([labels, ann['labels_ignore']]) + gt_bboxes.append(bboxes) + gt_labels.append(labels) + # If the dataset is VOC2007, then use 11 points mAP evaluation. + if hasattr(self.dataset, 'year') and self.dataset.year == 2007: + ds_name = 'voc07' + else: + ds_name = self.dataset.CLASSES + mean_ap, eval_results = eval_map( + results, + gt_bboxes, + gt_labels, + gt_ignore=gt_ignore, + scale_ranges=None, + iou_thr=0.5, + dataset=ds_name, + print_summary=True) + runner.log_buffer.output['mAP'] = mean_ap + runner.log_buffer.ready = True + + +class CocoDistEvalRecallHook(DistEvalHook): + + def __init__(self, + dataset, + proposal_nums=(100, 300, 1000), + iou_thrs=np.arange(0.5, 0.96, 0.05)): + super(CocoDistEvalRecallHook, self).__init__(dataset) + self.proposal_nums = np.array(proposal_nums, dtype=np.int32) + self.iou_thrs = np.array(iou_thrs, dtype=np.float32) + + def evaluate(self, runner, results): + # the official coco evaluation is too slow, here we use our own + # implementation instead, which may get slightly different results + ar = fast_eval_recall(results, self.dataset.coco, self.proposal_nums, + self.iou_thrs) + for i, num in enumerate(self.proposal_nums): + runner.log_buffer.output['AR@{}'.format(num)] = ar[i] + runner.log_buffer.ready = True + + +class CocoDistEvalmAPHook(DistEvalHook): + + def evaluate(self, runner, results): + tmp_file = osp.join(runner.work_dir, 'temp_0.json') + results2json(self.dataset, results, tmp_file) + + res_types = ['bbox', + 'segm'] if runner.model.module.with_mask else ['bbox'] + cocoGt = self.dataset.coco + cocoDt = cocoGt.loadRes(tmp_file) + imgIds = cocoGt.getImgIds() + for res_type in res_types: + iou_type = res_type + cocoEval = COCOeval(cocoGt, cocoDt, iou_type) + cocoEval.params.imgIds = imgIds + cocoEval.evaluate() + cocoEval.accumulate() + cocoEval.summarize() + metrics = ['mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'] + for i in range(len(metrics)): + key = '{}_{}'.format(res_type, metrics[i]) + val = float('{:.3f}'.format(cocoEval.stats[i])) + runner.log_buffer.output[key] = val + runner.log_buffer.output['{}_mAP_copypaste'.format(res_type)] = ( + '{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} ' + '{ap[4]:.3f} {ap[5]:.3f}').format(ap=cocoEval.stats[:6]) + runner.log_buffer.ready = True + os.remove(tmp_file) diff --git a/mmdet/core/evaluation/mean_ap.py b/mmdet/core/evaluation/mean_ap.py new file mode 100644 index 0000000..8f0474a --- /dev/null +++ b/mmdet/core/evaluation/mean_ap.py @@ -0,0 +1,378 @@ +import mmcv +import numpy as np +from terminaltables import AsciiTable + +from .bbox_overlaps import bbox_overlaps +from .class_names import get_classes + + +def average_precision(recalls, precisions, mode='area'): + """Calculate average precision (for single or multiple scales). + + Args: + recalls (ndarray): shape (num_scales, num_dets) or (num_dets, ) + precisions (ndarray): shape (num_scales, num_dets) or (num_dets, ) + mode (str): 'area' or '11points', 'area' means calculating the area + under precision-recall curve, '11points' means calculating + the average precision of recalls at [0, 0.1, ..., 1] + + Returns: + float or ndarray: calculated average precision + """ + no_scale = False + if recalls.ndim == 1: + no_scale = True + recalls = recalls[np.newaxis, :] + precisions = precisions[np.newaxis, :] + assert recalls.shape == precisions.shape and recalls.ndim == 2 + num_scales = recalls.shape[0] + ap = np.zeros(num_scales, dtype=np.float32) + if mode == 'area': + zeros = np.zeros((num_scales, 1), dtype=recalls.dtype) + ones = np.ones((num_scales, 1), dtype=recalls.dtype) + mrec = np.hstack((zeros, recalls, ones)) + mpre = np.hstack((zeros, precisions, zeros)) + for i in range(mpre.shape[1] - 1, 0, -1): + mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i]) + for i in range(num_scales): + ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0] + ap[i] = np.sum( + (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1]) + elif mode == '11points': + for i in range(num_scales): + for thr in np.arange(0, 1 + 1e-3, 0.1): + precs = precisions[i, recalls[i, :] >= thr] + prec = precs.max() if precs.size > 0 else 0 + ap[i] += prec + ap /= 11 + else: + raise ValueError( + 'Unrecognized mode, only "area" and "11points" are supported') + if no_scale: + ap = ap[0] + return ap + + +def tpfp_imagenet(det_bboxes, + gt_bboxes, + gt_ignore, + default_iou_thr, + area_ranges=None): + """Check if detected bboxes are true positive or false positive. + + Args: + det_bbox (ndarray): the detected bbox + gt_bboxes (ndarray): ground truth bboxes of this image + gt_ignore (ndarray): indicate if gts are ignored for evaluation or not + default_iou_thr (float): the iou thresholds for medium and large bboxes + area_ranges (list or None): gt bbox area ranges + + Returns: + tuple: two arrays (tp, fp) whose elements are 0 and 1 + """ + num_dets = det_bboxes.shape[0] + num_gts = gt_bboxes.shape[0] + if area_ranges is None: + area_ranges = [(None, None)] + num_scales = len(area_ranges) + # tp and fp are of shape (num_scales, num_gts), each row is tp or fp + # of a certain scale. + tp = np.zeros((num_scales, num_dets), dtype=np.float32) + fp = np.zeros((num_scales, num_dets), dtype=np.float32) + if gt_bboxes.shape[0] == 0: + if area_ranges == [(None, None)]: + fp[...] = 1 + else: + det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0] + 1) * ( + det_bboxes[:, 3] - det_bboxes[:, 1] + 1) + for i, (min_area, max_area) in enumerate(area_ranges): + fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 + return tp, fp + ious = bbox_overlaps(det_bboxes, gt_bboxes - 1) + gt_w = gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1 + gt_h = gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1 + iou_thrs = np.minimum((gt_w * gt_h) / ((gt_w + 10.0) * (gt_h + 10.0)), + default_iou_thr) + # sort all detections by scores in descending order + sort_inds = np.argsort(-det_bboxes[:, -1]) + for k, (min_area, max_area) in enumerate(area_ranges): + gt_covered = np.zeros(num_gts, dtype=bool) + # if no area range is specified, gt_area_ignore is all False + if min_area is None: + gt_area_ignore = np.zeros_like(gt_ignore, dtype=bool) + else: + gt_areas = gt_w * gt_h + gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) + for i in sort_inds: + max_iou = -1 + matched_gt = -1 + # find best overlapped available gt + for j in range(num_gts): + # different from PASCAL VOC: allow finding other gts if the + # best overlaped ones are already matched by other det bboxes + if gt_covered[j]: + continue + elif ious[i, j] >= iou_thrs[j] and ious[i, j] > max_iou: + max_iou = ious[i, j] + matched_gt = j + # there are 4 cases for a det bbox: + # 1. it matches a gt, tp = 1, fp = 0 + # 2. it matches an ignored gt, tp = 0, fp = 0 + # 3. it matches no gt and within area range, tp = 0, fp = 1 + # 4. it matches no gt but is beyond area range, tp = 0, fp = 0 + if matched_gt >= 0: + gt_covered[matched_gt] = 1 + if not (gt_ignore[matched_gt] or gt_area_ignore[matched_gt]): + tp[k, i] = 1 + elif min_area is None: + fp[k, i] = 1 + else: + bbox = det_bboxes[i, :4] + area = (bbox[2] - bbox[0] + 1) * (bbox[3] - bbox[1] + 1) + if area >= min_area and area < max_area: + fp[k, i] = 1 + return tp, fp + + +def tpfp_default(det_bboxes, gt_bboxes, gt_ignore, iou_thr, area_ranges=None): + """Check if detected bboxes are true positive or false positive. + + Args: + det_bbox (ndarray): the detected bbox + gt_bboxes (ndarray): ground truth bboxes of this image + gt_ignore (ndarray): indicate if gts are ignored for evaluation or not + iou_thr (float): the iou thresholds + + Returns: + tuple: (tp, fp), two arrays whose elements are 0 and 1 + """ + num_dets = det_bboxes.shape[0] + num_gts = gt_bboxes.shape[0] + if area_ranges is None: + area_ranges = [(None, None)] + num_scales = len(area_ranges) + # tp and fp are of shape (num_scales, num_gts), each row is tp or fp of + # a certain scale + tp = np.zeros((num_scales, num_dets), dtype=np.float32) + fp = np.zeros((num_scales, num_dets), dtype=np.float32) + # if there is no gt bboxes in this image, then all det bboxes + # within area range are false positives + if gt_bboxes.shape[0] == 0: + if area_ranges == [(None, None)]: + fp[...] = 1 + else: + det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0] + 1) * ( + det_bboxes[:, 3] - det_bboxes[:, 1] + 1) + for i, (min_area, max_area) in enumerate(area_ranges): + fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 + return tp, fp + ious = bbox_overlaps(det_bboxes, gt_bboxes) + ious_max = ious.max(axis=1) + ious_argmax = ious.argmax(axis=1) + sort_inds = np.argsort(-det_bboxes[:, -1]) + for k, (min_area, max_area) in enumerate(area_ranges): + gt_covered = np.zeros(num_gts, dtype=bool) + # if no area range is specified, gt_area_ignore is all False + if min_area is None: + gt_area_ignore = np.zeros_like(gt_ignore, dtype=bool) + else: + gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * ( + gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1) + gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) + for i in sort_inds: + if ious_max[i] >= iou_thr: + matched_gt = ious_argmax[i] + if not (gt_ignore[matched_gt] or gt_area_ignore[matched_gt]): + if not gt_covered[matched_gt]: + gt_covered[matched_gt] = True + tp[k, i] = 1 + else: + fp[k, i] = 1 + # otherwise ignore this detected bbox, tp = 0, fp = 0 + elif min_area is None: + fp[k, i] = 1 + else: + bbox = det_bboxes[i, :4] + area = (bbox[2] - bbox[0] + 1) * (bbox[3] - bbox[1] + 1) + if area >= min_area and area < max_area: + fp[k, i] = 1 + return tp, fp + + +def get_cls_results(det_results, gt_bboxes, gt_labels, gt_ignore, class_id): + """Get det results and gt information of a certain class.""" + cls_dets = [det[class_id] + for det in det_results] # det bboxes of this class + cls_gts = [] # gt bboxes of this class + cls_gt_ignore = [] + for j in range(len(gt_bboxes)): + gt_bbox = gt_bboxes[j] + cls_inds = (gt_labels[j] == class_id + 1) + cls_gt = gt_bbox[cls_inds, :] if gt_bbox.shape[0] > 0 else gt_bbox + cls_gts.append(cls_gt) + if gt_ignore is None: + cls_gt_ignore.append(np.zeros(cls_gt.shape[0], dtype=np.int32)) + else: + cls_gt_ignore.append(gt_ignore[j][cls_inds]) + return cls_dets, cls_gts, cls_gt_ignore + + +def eval_map(det_results, + gt_bboxes, + gt_labels, + gt_ignore=None, + scale_ranges=None, + iou_thr=0.5, + dataset=None, + print_summary=True): + """Evaluate mAP of a dataset. + + Args: + det_results (list): a list of list, [[cls1_det, cls2_det, ...], ...] + gt_bboxes (list): ground truth bboxes of each image, a list of K*4 + array. + gt_labels (list): ground truth labels of each image, a list of K array + gt_ignore (list): gt ignore indicators of each image, a list of K array + scale_ranges (list, optional): [(min1, max1), (min2, max2), ...] + iou_thr (float): IoU threshold + dataset (None or str or list): dataset name or dataset classes, there + are minor differences in metrics for different datsets, e.g. + "voc07", "imagenet_det", etc. + print_summary (bool): whether to print the mAP summary + + Returns: + tuple: (mAP, [dict, dict, ...]) + """ + assert len(det_results) == len(gt_bboxes) == len(gt_labels) + if gt_ignore is not None: + assert len(gt_ignore) == len(gt_labels) + for i in range(len(gt_ignore)): + assert len(gt_labels[i]) == len(gt_ignore[i]) + area_ranges = ([(rg[0]**2, rg[1]**2) for rg in scale_ranges] + if scale_ranges is not None else None) + num_scales = len(scale_ranges) if scale_ranges is not None else 1 + eval_results = [] + num_classes = len(det_results[0]) # positive class num + gt_labels = [ + label if label.ndim == 1 else label[:, 0] for label in gt_labels + ] + for i in range(num_classes): + # get gt and det bboxes of this class + cls_dets, cls_gts, cls_gt_ignore = get_cls_results( + det_results, gt_bboxes, gt_labels, gt_ignore, i) + # calculate tp and fp for each image + tpfp_func = ( + tpfp_imagenet if dataset in ['det', 'vid'] else tpfp_default) + tpfp = [ + tpfp_func(cls_dets[j], cls_gts[j], cls_gt_ignore[j], iou_thr, + area_ranges) for j in range(len(cls_dets)) + ] + tp, fp = tuple(zip(*tpfp)) + # calculate gt number of each scale, gts ignored or beyond scale + # are not counted + num_gts = np.zeros(num_scales, dtype=int) + for j, bbox in enumerate(cls_gts): + if area_ranges is None: + num_gts[0] += np.sum(np.logical_not(cls_gt_ignore[j])) + else: + gt_areas = (bbox[:, 2] - bbox[:, 0] + 1) * ( + bbox[:, 3] - bbox[:, 1] + 1) + for k, (min_area, max_area) in enumerate(area_ranges): + num_gts[k] += np.sum( + np.logical_not(cls_gt_ignore[j]) & + (gt_areas >= min_area) & (gt_areas < max_area)) + # sort all det bboxes by score, also sort tp and fp + cls_dets = np.vstack(cls_dets) + num_dets = cls_dets.shape[0] + sort_inds = np.argsort(-cls_dets[:, -1]) + tp = np.hstack(tp)[:, sort_inds] + fp = np.hstack(fp)[:, sort_inds] + # calculate recall and precision with tp and fp + tp = np.cumsum(tp, axis=1) + fp = np.cumsum(fp, axis=1) + eps = np.finfo(np.float32).eps + recalls = tp / np.maximum(num_gts[:, np.newaxis], eps) + precisions = tp / np.maximum((tp + fp), eps) + # calculate AP + if scale_ranges is None: + recalls = recalls[0, :] + precisions = precisions[0, :] + num_gts = num_gts.item() + mode = 'area' if dataset != 'voc07' else '11points' + ap = average_precision(recalls, precisions, mode) + eval_results.append({ + 'num_gts': num_gts, + 'num_dets': num_dets, + 'recall': recalls, + 'precision': precisions, + 'ap': ap + }) + if scale_ranges is not None: + # shape (num_classes, num_scales) + all_ap = np.vstack([cls_result['ap'] for cls_result in eval_results]) + all_num_gts = np.vstack( + [cls_result['num_gts'] for cls_result in eval_results]) + mean_ap = [ + all_ap[all_num_gts[:, i] > 0, i].mean() + if np.any(all_num_gts[:, i] > 0) else 0.0 + for i in range(num_scales) + ] + else: + aps = [] + for cls_result in eval_results: + if cls_result['num_gts'] > 0: + aps.append(cls_result['ap']) + mean_ap = np.array(aps).mean().item() if aps else 0.0 + if print_summary: + print_map_summary(mean_ap, eval_results, dataset) + + return mean_ap, eval_results + + +def print_map_summary(mean_ap, results, dataset=None): + """Print mAP and results of each class. + + Args: + mean_ap(float): calculated from `eval_map` + results(list): calculated from `eval_map` + dataset(None or str or list): dataset name or dataset classes. + """ + num_scales = len(results[0]['ap']) if isinstance(results[0]['ap'], + np.ndarray) else 1 + num_classes = len(results) + + recalls = np.zeros((num_scales, num_classes), dtype=np.float32) + precisions = np.zeros((num_scales, num_classes), dtype=np.float32) + aps = np.zeros((num_scales, num_classes), dtype=np.float32) + num_gts = np.zeros((num_scales, num_classes), dtype=int) + for i, cls_result in enumerate(results): + if cls_result['recall'].size > 0: + recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1] + precisions[:, i] = np.array( + cls_result['precision'], ndmin=2)[:, -1] + aps[:, i] = cls_result['ap'] + num_gts[:, i] = cls_result['num_gts'] + + if dataset is None: + label_names = [str(i) for i in range(1, num_classes + 1)] + elif mmcv.is_str(dataset): + label_names = get_classes(dataset) + else: + label_names = dataset + + if not isinstance(mean_ap, list): + mean_ap = [mean_ap] + header = ['class', 'gts', 'dets', 'recall', 'precision', 'ap'] + for i in range(num_scales): + table_data = [header] + for j in range(num_classes): + row_data = [ + label_names[j], num_gts[i, j], results[j]['num_dets'], + '{:.3f}'.format(recalls[i, j]), '{:.3f}'.format( + precisions[i, j]), '{:.3f}'.format(aps[i, j]) + ] + table_data.append(row_data) + table_data.append(['mAP', '', '', '', '', '{:.3f}'.format(mean_ap[i])]) + table = AsciiTable(table_data) + table.inner_footing_row_border = True + print(table.table) diff --git a/mmdet/core/evaluation/recall.py b/mmdet/core/evaluation/recall.py new file mode 100644 index 0000000..2a56f42 --- /dev/null +++ b/mmdet/core/evaluation/recall.py @@ -0,0 +1,185 @@ +import numpy as np +from terminaltables import AsciiTable + +from .bbox_overlaps import bbox_overlaps + + +def _recalls(all_ious, proposal_nums, thrs): + + img_num = all_ious.shape[0] + total_gt_num = sum([ious.shape[0] for ious in all_ious]) + + _ious = np.zeros((proposal_nums.size, total_gt_num), dtype=np.float32) + for k, proposal_num in enumerate(proposal_nums): + tmp_ious = np.zeros(0) + for i in range(img_num): + ious = all_ious[i][:, :proposal_num].copy() + gt_ious = np.zeros((ious.shape[0])) + if ious.size == 0: + tmp_ious = np.hstack((tmp_ious, gt_ious)) + continue + for j in range(ious.shape[0]): + gt_max_overlaps = ious.argmax(axis=1) + max_ious = ious[np.arange(0, ious.shape[0]), gt_max_overlaps] + gt_idx = max_ious.argmax() + gt_ious[j] = max_ious[gt_idx] + box_idx = gt_max_overlaps[gt_idx] + ious[gt_idx, :] = -1 + ious[:, box_idx] = -1 + tmp_ious = np.hstack((tmp_ious, gt_ious)) + _ious[k, :] = tmp_ious + + _ious = np.fliplr(np.sort(_ious, axis=1)) + recalls = np.zeros((proposal_nums.size, thrs.size)) + for i, thr in enumerate(thrs): + recalls[:, i] = (_ious >= thr).sum(axis=1) / float(total_gt_num) + + return recalls + + +def set_recall_param(proposal_nums, iou_thrs): + """Check proposal_nums and iou_thrs and set correct format. + """ + if isinstance(proposal_nums, list): + _proposal_nums = np.array(proposal_nums) + elif isinstance(proposal_nums, int): + _proposal_nums = np.array([proposal_nums]) + else: + _proposal_nums = proposal_nums + + if iou_thrs is None: + _iou_thrs = np.array([0.5]) + elif isinstance(iou_thrs, list): + _iou_thrs = np.array(iou_thrs) + elif isinstance(iou_thrs, float): + _iou_thrs = np.array([iou_thrs]) + else: + _iou_thrs = iou_thrs + + return _proposal_nums, _iou_thrs + + +def eval_recalls(gts, + proposals, + proposal_nums=None, + iou_thrs=None, + print_summary=True): + """Calculate recalls. + + Args: + gts(list or ndarray): a list of arrays of shape (n, 4) + proposals(list or ndarray): a list of arrays of shape (k, 4) or (k, 5) + proposal_nums(int or list of int or ndarray): top N proposals + thrs(float or list or ndarray): iou thresholds + + Returns: + ndarray: recalls of different ious and proposal nums + """ + + img_num = len(gts) + assert img_num == len(proposals) + + proposal_nums, iou_thrs = set_recall_param(proposal_nums, iou_thrs) + + all_ious = [] + for i in range(img_num): + if proposals[i].ndim == 2 and proposals[i].shape[1] == 5: + scores = proposals[i][:, 4] + sort_idx = np.argsort(scores)[::-1] + img_proposal = proposals[i][sort_idx, :] + else: + img_proposal = proposals[i] + prop_num = min(img_proposal.shape[0], proposal_nums[-1]) + if gts[i] is None or gts[i].shape[0] == 0: + ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32) + else: + ious = bbox_overlaps(gts[i], img_proposal[:prop_num, :4]) + all_ious.append(ious) + all_ious = np.array(all_ious) + recalls = _recalls(all_ious, proposal_nums, iou_thrs) + if print_summary: + print_recall_summary(recalls, proposal_nums, iou_thrs) + return recalls + + +def print_recall_summary(recalls, + proposal_nums, + iou_thrs, + row_idxs=None, + col_idxs=None): + """Print recalls in a table. + + Args: + recalls(ndarray): calculated from `bbox_recalls` + proposal_nums(ndarray or list): top N proposals + iou_thrs(ndarray or list): iou thresholds + row_idxs(ndarray): which rows(proposal nums) to print + col_idxs(ndarray): which cols(iou thresholds) to print + """ + proposal_nums = np.array(proposal_nums, dtype=np.int32) + iou_thrs = np.array(iou_thrs) + if row_idxs is None: + row_idxs = np.arange(proposal_nums.size) + if col_idxs is None: + col_idxs = np.arange(iou_thrs.size) + row_header = [''] + iou_thrs[col_idxs].tolist() + table_data = [row_header] + for i, num in enumerate(proposal_nums[row_idxs]): + row = [ + '{:.3f}'.format(val) + for val in recalls[row_idxs[i], col_idxs].tolist() + ] + row.insert(0, num) + table_data.append(row) + table = AsciiTable(table_data) + print(table.table) + + +def plot_num_recall(recalls, proposal_nums): + """Plot Proposal_num-Recalls curve. + + Args: + recalls(ndarray or list): shape (k,) + proposal_nums(ndarray or list): same shape as `recalls` + """ + if isinstance(proposal_nums, np.ndarray): + _proposal_nums = proposal_nums.tolist() + else: + _proposal_nums = proposal_nums + if isinstance(recalls, np.ndarray): + _recalls = recalls.tolist() + else: + _recalls = recalls + + import matplotlib.pyplot as plt + f = plt.figure() + plt.plot([0] + _proposal_nums, [0] + _recalls) + plt.xlabel('Proposal num') + plt.ylabel('Recall') + plt.axis([0, proposal_nums.max(), 0, 1]) + f.show() + + +def plot_iou_recall(recalls, iou_thrs): + """Plot IoU-Recalls curve. + + Args: + recalls(ndarray or list): shape (k,) + iou_thrs(ndarray or list): same shape as `recalls` + """ + if isinstance(iou_thrs, np.ndarray): + _iou_thrs = iou_thrs.tolist() + else: + _iou_thrs = iou_thrs + if isinstance(recalls, np.ndarray): + _recalls = recalls.tolist() + else: + _recalls = recalls + + import matplotlib.pyplot as plt + f = plt.figure() + plt.plot(_iou_thrs + [1.0], _recalls + [0.]) + plt.xlabel('IoU') + plt.ylabel('Recall') + plt.axis([iou_thrs.min(), 1, 0, 1]) + f.show() diff --git a/mmdet/core/loss/__init__.py b/mmdet/core/loss/__init__.py new file mode 100644 index 0000000..8880518 --- /dev/null +++ b/mmdet/core/loss/__init__.py @@ -0,0 +1,12 @@ +from .losses import ( + weighted_nll_loss, weighted_cross_entropy, weighted_binary_cross_entropy, + sigmoid_focal_loss, py_sigmoid_focal_loss, weighted_sigmoid_focal_loss, + mask_cross_entropy, smooth_l1_loss, weighted_smoothl1, accuracy, iou_loss) + +__all__ = [ + 'weighted_nll_loss', 'weighted_cross_entropy', + 'weighted_binary_cross_entropy', 'sigmoid_focal_loss', + 'py_sigmoid_focal_loss', 'weighted_sigmoid_focal_loss', + 'mask_cross_entropy', 'smooth_l1_loss', 'weighted_smoothl1', 'accuracy', + 'iou_loss' +] diff --git a/mmdet/core/loss/losses.py b/mmdet/core/loss/losses.py new file mode 100644 index 0000000..e541ec4 --- /dev/null +++ b/mmdet/core/loss/losses.py @@ -0,0 +1,143 @@ +# TODO merge naive and weighted loss. +import torch +import torch.nn.functional as F + +from ..bbox import bbox_overlaps +from ...ops import sigmoid_focal_loss + + +def weighted_nll_loss(pred, label, weight, avg_factor=None): + if avg_factor is None: + avg_factor = max(torch.sum(weight > 0).float().item(), 1.) + raw = F.nll_loss(pred, label, reduction='none') + return torch.sum(raw * weight)[None] / avg_factor + + +def weighted_cross_entropy(pred, label, weight, avg_factor=None, reduce=True): + if avg_factor is None: + avg_factor = max(torch.sum(weight > 0).float().item(), 1.) + raw = F.cross_entropy(pred, label, reduction='none') + if reduce: + return torch.sum(raw * weight)[None] / avg_factor + else: + return raw * weight / avg_factor + + +def weighted_binary_cross_entropy(pred, label, weight, avg_factor=None): + if pred.dim() != label.dim(): + label, weight = _expand_binary_labels(label, weight, pred.size(-1)) + if avg_factor is None: + avg_factor = max(torch.sum(weight > 0).float().item(), 1.) + return F.binary_cross_entropy_with_logits( + pred, label.float(), weight.float(), + reduction='sum')[None] / avg_factor + + +def py_sigmoid_focal_loss(pred, + target, + weight, + gamma=2.0, + alpha=0.25, + reduction='mean'): + pred_sigmoid = pred.sigmoid() + target = target.type_as(pred) + pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) + weight = (alpha * target + (1 - alpha) * (1 - target)) * weight + weight = weight * pt.pow(gamma) + loss = F.binary_cross_entropy_with_logits( + pred, target, reduction='none') * weight + reduction_enum = F._Reduction.get_enum(reduction) + # none: 0, mean:1, sum: 2 + if reduction_enum == 0: + return loss + elif reduction_enum == 1: + return loss.mean() + elif reduction_enum == 2: + return loss.sum() + + +def weighted_sigmoid_focal_loss(pred, + target, + weight, + gamma=2.0, + alpha=0.25, + avg_factor=None, + num_classes=80): + if avg_factor is None: + avg_factor = torch.sum(weight > 0).float().item() / num_classes + 1e-6 + return torch.sum( + sigmoid_focal_loss(pred, target, gamma, alpha, 'none') * weight.view( + -1, 1))[None] / avg_factor + + +def mask_cross_entropy(pred, target, label): + num_rois = pred.size()[0] + inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) + pred_slice = pred[inds, label].squeeze(1) + return F.binary_cross_entropy_with_logits( + pred_slice, target, reduction='mean')[None] + + +def smooth_l1_loss(pred, target, beta=1.0, reduction='mean'): + assert beta > 0 + assert pred.size() == target.size() and target.numel() > 0 + diff = torch.abs(pred - target) + loss = torch.where(diff < beta, 0.5 * diff * diff / beta, + diff - 0.5 * beta) + reduction_enum = F._Reduction.get_enum(reduction) + # none: 0, mean:1, sum: 2 + if reduction_enum == 0: + return loss + elif reduction_enum == 1: + return loss.sum() / pred.numel() + elif reduction_enum == 2: + return loss.sum() + + +def weighted_smoothl1(pred, target, weight, beta=1.0, avg_factor=None): + if avg_factor is None: + avg_factor = torch.sum(weight > 0).float().item() / 4 + 1e-6 + loss = smooth_l1_loss(pred, target, beta, reduction='none') + return torch.sum(loss * weight)[None] / avg_factor + + +def accuracy(pred, target, topk=1): + if isinstance(topk, int): + topk = (topk, ) + return_single = True + else: + return_single = False + + maxk = max(topk) + _, pred_label = pred.topk(maxk, 1, True, True) + pred_label = pred_label.t() + correct = pred_label.eq(target.view(1, -1).expand_as(pred_label)) + + res = [] + for k in topk: + correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) + res.append(correct_k.mul_(100.0 / pred.size(0))) + return res[0] if return_single else res + + +def _expand_binary_labels(labels, label_weights, label_channels): + bin_labels = labels.new_full((labels.size(0), label_channels), 0) + inds = torch.nonzero(labels >= 1).squeeze() + if inds.numel() > 0: + bin_labels[inds, labels[inds] - 1] = 1 + bin_label_weights = label_weights.view(-1, 1).expand( + label_weights.size(0), label_channels) + return bin_labels, bin_label_weights + + +def iou_loss(pred_bboxes, target_bboxes, reduction='mean'): + ious = bbox_overlaps(pred_bboxes, target_bboxes, is_aligned=True) + loss = -ious.log() + + reduction_enum = F._Reduction.get_enum(reduction) + if reduction_enum == 0: + return loss + elif reduction_enum == 1: + return loss.mean() + elif reduction_enum == 2: + return loss.sum() diff --git a/mmdet/core/mask/__init__.py b/mmdet/core/mask/__init__.py new file mode 100644 index 0000000..ce85fee --- /dev/null +++ b/mmdet/core/mask/__init__.py @@ -0,0 +1,4 @@ +from .utils import split_combined_polys +from .mask_target import mask_target +from .grid_target import random_jitter,grid_target +__all__ = ['split_combined_polys', 'mask_target', 'grid_target', 'random_jitter'] diff --git a/mmdet/core/mask/grid_target.py b/mmdet/core/mask/grid_target.py new file mode 100644 index 0000000..ed0b182 --- /dev/null +++ b/mmdet/core/mask/grid_target.py @@ -0,0 +1,82 @@ +import torch +import numpy as np +import mmcv + +def reduce_vision(x,mask_size): + quater_size = mask_size // 4 + base = ((0,quater_size*2),(quater_size,quater_size*3),(quater_size*2,quater_size*4)) + layers = [x[:,i:i+1][:,:,base[i%3][0]:base[i%3][1],base[i//3][0]:base[i//3][1]] for i in range(9)] + layers = torch.cat(layers,dim=1) + return layers + +def grid_target(sampling_results,cfg): + #We don't care about image_idx and mix all samples(across images) together. + pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results],dim=0) + pos_gt_bboxes = torch.cat([res.pos_gt_bboxes for res in sampling_results],dim=0) + assert(pos_bboxes.shape == pos_gt_bboxes.shape) + + #expand pos_bboxes + x1 = pos_bboxes[:,0] - (pos_bboxes[:,2] - pos_bboxes[:,0]) / 2 + y1 = pos_bboxes[:,1] - (pos_bboxes[:,3] - pos_bboxes[:,1]) / 2 + x2 = pos_bboxes[:,2] + (pos_bboxes[:,2] - pos_bboxes[:,0]) / 2 + y2 = pos_bboxes[:,3] + (pos_bboxes[:,3] - pos_bboxes[:,1]) / 2 + + pos_bboxes = torch.cat(list(map(lambda x:x.view(-1,1),[x1,y1,x2,y2])),dim=1) + + R = pos_bboxes.shape[0] + G = cfg.num_grids + mask_size = cfg.mask_size + targets = torch.zeros([R,G,mask_size,mask_size]) + + for rix in range(R): + for gix in range(G): + gridpoint_x = (1-gix//3/2)*(pos_gt_bboxes[rix,0]) + (gix//3/2)*(pos_gt_bboxes[rix,2]) + gridpoint_y = (1-gix%3/2)*(pos_gt_bboxes[rix,1]) + (gix%3/2)*(pos_gt_bboxes[rix,3]) + if (pos_bboxes[rix,2] - pos_bboxes[rix,0]) < 4 or (pos_bboxes[rix,3] - pos_bboxes[rix,1]) < 4: + continue + cx = int((gridpoint_x - pos_bboxes[rix,0])/(pos_bboxes[rix,2] - pos_bboxes[rix,0])*mask_size) + cy = int((gridpoint_y - pos_bboxes[rix,1])/(pos_bboxes[rix,3] - pos_bboxes[rix,1])*mask_size) + radius = cfg.get('radius',1) + for x in range(cx - radius,cx + radius + 1): + for y in range(cy - radius, cy + radius + 1): + if x >= 0 and x < mask_size and y>=0 and y < mask_size and (x-cx)**2+(y-cy)**2<=radius**2: + targets[rix,gix,y,x] = 1 + targets = reduce_vision(targets,mask_size) + targets = targets.float().cuda() + return targets + +### radom jittering +def random_jitter_single(sampling_results,img_meta,amplitude=0.15): + rois = sampling_results.pos_bboxes.cpu() + R,K = rois.shape + random_offset = torch.FloatTensor(R,4).uniform_(-amplitude,amplitude) + #before jittering + ctx_ = (rois[:,2] + rois[:,0])/2 + cty_ = (rois[:,1] + rois[:,3])/2 + width_ = (rois[:,2] - rois[:,0]).abs() + height_ = (rois[:,3] - rois[:,1]).abs() + #after jittering + ctx = ctx_ + random_offset[:,0] * width_ + cty = cty_ + random_offset[:,1] * height_ + width = width_ * (1 + random_offset[:,2]) + height = height_ * (1 + random_offset[:,3]) + + x1 = (ctx - width/2).view(-1,1) + y1 = (cty - height/2).view(-1,1) + x2 = (ctx + width/2).view(-1,1) + y2 = (cty + height/2).view(-1,1) + + max_shape = img_meta['img_shape'] + if max_shape is not None: + x1 = x1.clamp(min=0, max=max_shape[1] - 1) + y1 = y1.clamp(min=0, max=max_shape[0] - 1) + x2 = x2.clamp(min=0, max=max_shape[1] - 1) + y2 = y2.clamp(min=0, max=max_shape[0] - 1) + + rois = torch.cat([x1,y1,x2,y2],dim=1).cuda() + sampling_results.pos_bboxes = rois + return sampling_results + +def random_jitter(sampling_results,img_metas): + post_sampling_results = map(random_jitter_single,sampling_results,img_metas) + return list(post_sampling_results) diff --git a/mmdet/core/mask/mask_target.py b/mmdet/core/mask/mask_target.py new file mode 100644 index 0000000..be93dfc --- /dev/null +++ b/mmdet/core/mask/mask_target.py @@ -0,0 +1,36 @@ +import torch +import numpy as np +import mmcv + + +def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list, + cfg): + cfg_list = [cfg for _ in range(len(pos_proposals_list))] + mask_targets = map(mask_target_single, pos_proposals_list, + pos_assigned_gt_inds_list, gt_masks_list, cfg_list) + mask_targets = torch.cat(list(mask_targets)) + return mask_targets + + +def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg): + mask_size = cfg.mask_size + num_pos = pos_proposals.size(0) + mask_targets = [] + if num_pos > 0: + proposals_np = pos_proposals.cpu().numpy() + pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() + for i in range(num_pos): + gt_mask = gt_masks[pos_assigned_gt_inds[i]] + bbox = proposals_np[i, :].astype(np.int32) + x1, y1, x2, y2 = bbox + w = np.maximum(x2 - x1 + 1, 1) + h = np.maximum(y2 - y1 + 1, 1) + # mask is uint8 both before and after resizing + target = mmcv.imresize(gt_mask[y1:y1 + h, x1:x1 + w], + (mask_size, mask_size)) + mask_targets.append(target) + mask_targets = torch.from_numpy(np.stack(mask_targets)).float().to( + pos_proposals.device) + else: + mask_targets = pos_proposals.new_zeros((0, mask_size, mask_size)) + return mask_targets diff --git a/mmdet/core/mask/utils.py b/mmdet/core/mask/utils.py new file mode 100644 index 0000000..a68312b --- /dev/null +++ b/mmdet/core/mask/utils.py @@ -0,0 +1,30 @@ +import mmcv + + +def split_combined_polys(polys, poly_lens, polys_per_mask): + """Split the combined 1-D polys into masks. + + A mask is represented as a list of polys, and a poly is represented as + a 1-D array. In dataset, all masks are concatenated into a single 1-D + tensor. Here we need to split the tensor into original representations. + + Args: + polys (list): a list (length = image num) of 1-D tensors + poly_lens (list): a list (length = image num) of poly length + polys_per_mask (list): a list (length = image num) of poly number + of each mask + + Returns: + list: a list (length = image num) of list (length = mask num) of + list (length = poly num) of numpy array + """ + mask_polys_list = [] + for img_id in range(len(polys)): + polys_single = polys[img_id] + polys_lens_single = poly_lens[img_id].tolist() + polys_per_mask_single = polys_per_mask[img_id].tolist() + + split_polys = mmcv.slice_list(polys_single, polys_lens_single) + mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single) + mask_polys_list.append(mask_polys) + return mask_polys_list diff --git a/mmdet/core/post_processing/__init__.py b/mmdet/core/post_processing/__init__.py new file mode 100644 index 0000000..1b24a3f --- /dev/null +++ b/mmdet/core/post_processing/__init__.py @@ -0,0 +1,8 @@ +from .bbox_nms import multiclass_nms +from .merge_augs import (merge_aug_proposals, merge_aug_bboxes, + merge_aug_scores, merge_aug_masks) + +__all__ = [ + 'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes', + 'merge_aug_scores', 'merge_aug_masks' +] diff --git a/mmdet/core/post_processing/bbox_nms.py b/mmdet/core/post_processing/bbox_nms.py new file mode 100644 index 0000000..cb3fe21 --- /dev/null +++ b/mmdet/core/post_processing/bbox_nms.py @@ -0,0 +1,64 @@ +import torch + +from mmdet.ops.nms import nms_wrapper + + +def multiclass_nms(multi_bboxes, + multi_scores, + score_thr, + nms_cfg, + max_num=-1, + score_factors=None): + """NMS for multi-class bboxes. + + Args: + multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) + multi_scores (Tensor): shape (n, #class) + score_thr (float): bbox threshold, bboxes with scores lower than it + will not be considered. + nms_thr (float): NMS IoU threshold + max_num (int): if there are more than max_num bboxes after NMS, + only top max_num will be kept. + score_factors (Tensor): The factors multiplied to scores before + applying NMS + + Returns: + tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels + are 0-based. + """ + num_classes = multi_scores.shape[1] + bboxes, labels = [], [] + nms_cfg_ = nms_cfg.copy() + nms_type = nms_cfg_.pop('type', 'nms') + nms_op = getattr(nms_wrapper, nms_type) + for i in range(1, num_classes): + cls_inds = multi_scores[:, i] > score_thr + if not cls_inds.any(): + continue + # get bboxes and scores of this class + if multi_bboxes.shape[1] == 4: + _bboxes = multi_bboxes[cls_inds, :] + else: + _bboxes = multi_bboxes[cls_inds, i * 4:(i + 1) * 4] + _scores = multi_scores[cls_inds, i] + if score_factors is not None: + _scores *= score_factors[cls_inds] + cls_dets = torch.cat([_bboxes, _scores[:, None]], dim=1) + cls_dets, _ = nms_op(cls_dets, **nms_cfg_) + cls_labels = multi_bboxes.new_full( + (cls_dets.shape[0], ), i - 1, dtype=torch.long) + bboxes.append(cls_dets) + labels.append(cls_labels) + if bboxes: + bboxes = torch.cat(bboxes) + labels = torch.cat(labels) + if bboxes.shape[0] > max_num: + _, inds = bboxes[:, -1].sort(descending=True) + inds = inds[:max_num] + bboxes = bboxes[inds] + labels = labels[inds] + else: + bboxes = multi_bboxes.new_zeros((0, 5)) + labels = multi_bboxes.new_zeros((0, ), dtype=torch.long) + + return bboxes, labels diff --git a/mmdet/core/post_processing/merge_augs.py b/mmdet/core/post_processing/merge_augs.py new file mode 100644 index 0000000..f97954b --- /dev/null +++ b/mmdet/core/post_processing/merge_augs.py @@ -0,0 +1,96 @@ +import torch + +import numpy as np + +from mmdet.ops import nms +from ..bbox import bbox_mapping_back + + +def merge_aug_proposals(aug_proposals, img_metas, rpn_test_cfg): + """Merge augmented proposals (multiscale, flip, etc.) + + Args: + aug_proposals (list[Tensor]): proposals from different testing + schemes, shape (n, 5). Note that they are not rescaled to the + original image size. + img_metas (list[dict]): image info including "shape_scale" and "flip". + rpn_test_cfg (dict): rpn test config. + + Returns: + Tensor: shape (n, 4), proposals corresponding to original image scale. + """ + recovered_proposals = [] + for proposals, img_info in zip(aug_proposals, img_metas): + img_shape = img_info['img_shape'] + scale_factor = img_info['scale_factor'] + flip = img_info['flip'] + _proposals = proposals.clone() + _proposals[:, :4] = bbox_mapping_back(_proposals[:, :4], img_shape, + scale_factor, flip) + recovered_proposals.append(_proposals) + aug_proposals = torch.cat(recovered_proposals, dim=0) + merged_proposals, _ = nms(aug_proposals, rpn_test_cfg.nms_thr) + scores = merged_proposals[:, 4] + _, order = scores.sort(0, descending=True) + num = min(rpn_test_cfg.max_num, merged_proposals.shape[0]) + order = order[:num] + merged_proposals = merged_proposals[order, :] + return merged_proposals + + +def merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg): + """Merge augmented detection bboxes and scores. + + Args: + aug_bboxes (list[Tensor]): shape (n, 4*#class) + aug_scores (list[Tensor] or None): shape (n, #class) + img_shapes (list[Tensor]): shape (3, ). + rcnn_test_cfg (dict): rcnn test config. + + Returns: + tuple: (bboxes, scores) + """ + recovered_bboxes = [] + for bboxes, img_info in zip(aug_bboxes, img_metas): + img_shape = img_info[0]['img_shape'] + scale_factor = img_info[0]['scale_factor'] + flip = img_info[0]['flip'] + bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip) + recovered_bboxes.append(bboxes) + bboxes = torch.stack(recovered_bboxes).mean(dim=0) + if aug_scores is None: + return bboxes + else: + scores = torch.stack(aug_scores).mean(dim=0) + return bboxes, scores + + +def merge_aug_scores(aug_scores): + """Merge augmented bbox scores.""" + if isinstance(aug_scores[0], torch.Tensor): + return torch.mean(torch.stack(aug_scores), dim=0) + else: + return np.mean(aug_scores, axis=0) + + +def merge_aug_masks(aug_masks, img_metas, rcnn_test_cfg, weights=None): + """Merge augmented mask prediction. + + Args: + aug_masks (list[ndarray]): shape (n, #class, h, w) + img_shapes (list[ndarray]): shape (3, ). + rcnn_test_cfg (dict): rcnn test config. + + Returns: + tuple: (bboxes, scores) + """ + recovered_masks = [ + mask if not img_info[0]['flip'] else mask[..., ::-1] + for mask, img_info in zip(aug_masks, img_metas) + ] + if weights is None: + merged_masks = np.mean(recovered_masks, axis=0) + else: + merged_masks = np.average( + np.array(recovered_masks), axis=0, weights=np.array(weights)) + return merged_masks diff --git a/mmdet/core/utils/__init__.py b/mmdet/core/utils/__init__.py new file mode 100644 index 0000000..89e952e --- /dev/null +++ b/mmdet/core/utils/__init__.py @@ -0,0 +1,7 @@ +from .dist_utils import allreduce_grads, DistOptimizerHook +from .misc import tensor2imgs, unmap, multi_apply + +__all__ = [ + 'allreduce_grads', 'DistOptimizerHook', 'tensor2imgs', 'unmap', + 'multi_apply' +] diff --git a/mmdet/core/utils/dist_utils.py b/mmdet/core/utils/dist_utils.py new file mode 100644 index 0000000..ec84bb4 --- /dev/null +++ b/mmdet/core/utils/dist_utils.py @@ -0,0 +1,57 @@ +from collections import OrderedDict + +import torch.distributed as dist +from torch._utils import (_flatten_dense_tensors, _unflatten_dense_tensors, + _take_tensors) +from mmcv.runner import OptimizerHook + + +def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1): + if bucket_size_mb > 0: + bucket_size_bytes = bucket_size_mb * 1024 * 1024 + buckets = _take_tensors(tensors, bucket_size_bytes) + else: + buckets = OrderedDict() + for tensor in tensors: + tp = tensor.type() + if tp not in buckets: + buckets[tp] = [] + buckets[tp].append(tensor) + buckets = buckets.values() + + for bucket in buckets: + flat_tensors = _flatten_dense_tensors(bucket) + dist.all_reduce(flat_tensors) + flat_tensors.div_(world_size) + for tensor, synced in zip( + bucket, _unflatten_dense_tensors(flat_tensors, bucket)): + tensor.copy_(synced) + + +def allreduce_grads(model, coalesce=True, bucket_size_mb=-1): + grads = [ + param.grad.data for param in model.parameters() + if param.requires_grad and param.grad is not None + ] + world_size = dist.get_world_size() + if coalesce: + _allreduce_coalesced(grads, world_size, bucket_size_mb) + else: + for tensor in grads: + dist.all_reduce(tensor.div_(world_size)) + + +class DistOptimizerHook(OptimizerHook): + + def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=-1): + self.grad_clip = grad_clip + self.coalesce = coalesce + self.bucket_size_mb = bucket_size_mb + + def after_train_iter(self, runner): + runner.optimizer.zero_grad() + runner.outputs['loss'].backward() + allreduce_grads(runner.model, self.coalesce, self.bucket_size_mb) + if self.grad_clip is not None: + self.clip_grads(runner.model.parameters()) + runner.optimizer.step() diff --git a/mmdet/core/utils/misc.py b/mmdet/core/utils/misc.py new file mode 100644 index 0000000..262f168 --- /dev/null +++ b/mmdet/core/utils/misc.py @@ -0,0 +1,37 @@ +from functools import partial + +import mmcv +import numpy as np +from six.moves import map, zip + + +def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True): + num_imgs = tensor.size(0) + mean = np.array(mean, dtype=np.float32) + std = np.array(std, dtype=np.float32) + imgs = [] + for img_id in range(num_imgs): + img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0) + img = mmcv.imdenormalize( + img, mean, std, to_bgr=to_rgb).astype(np.uint8) + imgs.append(np.ascontiguousarray(img)) + return imgs + + +def multi_apply(func, *args, **kwargs): + pfunc = partial(func, **kwargs) if kwargs else func + map_results = map(pfunc, *args) + return tuple(map(list, zip(*map_results))) + + +def unmap(data, count, inds, fill=0): + """ Unmap a subset of item (data) back to the original set of items (of + size count) """ + if data.dim() == 1: + ret = data.new_full((count, ), fill) + ret[inds] = data + else: + new_size = (count, ) + data.size()[1:] + ret = data.new_full(new_size, fill) + ret[inds, :] = data + return ret diff --git a/mmdet/datasets/__init__.py b/mmdet/datasets/__init__.py new file mode 100644 index 0000000..572b0fa --- /dev/null +++ b/mmdet/datasets/__init__.py @@ -0,0 +1,16 @@ +from .custom import CustomDataset +from .xml_style import XMLDataset +from .coco import CocoDataset +from .voc import VOCDataset +from .loader import GroupSampler, DistributedGroupSampler, build_dataloader +from .utils import to_tensor, random_scale, show_ann, get_dataset +from .concat_dataset import ConcatDataset +from .repeat_dataset import RepeatDataset +from .extra_aug import ExtraAugmentation + +__all__ = [ + 'CustomDataset', 'XMLDataset', 'CocoDataset', 'VOCDataset', 'GroupSampler', + 'DistributedGroupSampler', 'build_dataloader', 'to_tensor', 'random_scale', + 'show_ann', 'get_dataset', 'ConcatDataset', 'RepeatDataset', + 'ExtraAugmentation' +] diff --git a/mmdet/datasets/coco.py b/mmdet/datasets/coco.py new file mode 100644 index 0000000..0b3af9b --- /dev/null +++ b/mmdet/datasets/coco.py @@ -0,0 +1,118 @@ +import numpy as np +from pycocotools.coco import COCO + +from .custom import CustomDataset + + +class CocoDataset(CustomDataset): + + CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', + 'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant', + 'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog', + 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', + 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', + 'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat', + 'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket', + 'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', + 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', + 'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch', + 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop', + 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave', + 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', + 'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush') + + def load_annotations(self, ann_file): + self.coco = COCO(ann_file) + self.cat_ids = self.coco.getCatIds() + self.cat2label = { + cat_id: i + 1 + for i, cat_id in enumerate(self.cat_ids) + } + self.img_ids = self.coco.getImgIds() + img_infos = [] + for i in self.img_ids: + info = self.coco.loadImgs([i])[0] + info['filename'] = info['file_name'] + img_infos.append(info) + return img_infos + + def get_ann_info(self, idx): + img_id = self.img_infos[idx]['id'] + ann_ids = self.coco.getAnnIds(imgIds=[img_id]) + ann_info = self.coco.loadAnns(ann_ids) + return self._parse_ann_info(ann_info, self.with_mask) + + def _filter_imgs(self, min_size=32): + """Filter images too small or without ground truths.""" + valid_inds = [] + ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values()) + for i, img_info in enumerate(self.img_infos): + if self.img_ids[i] not in ids_with_ann: + continue + if min(img_info['width'], img_info['height']) >= min_size: + valid_inds.append(i) + return valid_inds + + def _parse_ann_info(self, ann_info, with_mask=True): + """Parse bbox and mask annotation. + + Args: + ann_info (list[dict]): Annotation info of an image. + with_mask (bool): Whether to parse mask annotations. + + Returns: + dict: A dict containing the following keys: bboxes, bboxes_ignore, + labels, masks, mask_polys, poly_lens. + """ + gt_bboxes = [] + gt_labels = [] + gt_bboxes_ignore = [] + # Two formats are provided. + # 1. mask: a binary map of the same size of the image. + # 2. polys: each mask consists of one or several polys, each poly is a + # list of float. + if with_mask: + gt_masks = [] + gt_mask_polys = [] + gt_poly_lens = [] + for i, ann in enumerate(ann_info): + if ann.get('ignore', False): + continue + x1, y1, w, h = ann['bbox'] + if ann['area'] <= 0 or w < 1 or h < 1: + continue + bbox = [x1, y1, x1 + w - 1, y1 + h - 1] + if ann['iscrowd']: + gt_bboxes_ignore.append(bbox) + else: + gt_bboxes.append(bbox) + gt_labels.append(self.cat2label[ann['category_id']]) + if with_mask: + gt_masks.append(self.coco.annToMask(ann)) + mask_polys = [ + p for p in ann['segmentation'] if len(p) >= 6 + ] # valid polygons have >= 3 points (6 coordinates) + poly_lens = [len(p) for p in mask_polys] + gt_mask_polys.append(mask_polys) + gt_poly_lens.extend(poly_lens) + if gt_bboxes: + gt_bboxes = np.array(gt_bboxes, dtype=np.float32) + gt_labels = np.array(gt_labels, dtype=np.int64) + else: + gt_bboxes = np.zeros((0, 4), dtype=np.float32) + gt_labels = np.array([], dtype=np.int64) + + if gt_bboxes_ignore: + gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) + else: + gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) + + ann = dict( + bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore) + + if with_mask: + ann['masks'] = gt_masks + # poly format is not used in the current implementation + ann['mask_polys'] = gt_mask_polys + ann['poly_lens'] = gt_poly_lens + return ann diff --git a/mmdet/datasets/concat_dataset.py b/mmdet/datasets/concat_dataset.py new file mode 100644 index 0000000..195420a --- /dev/null +++ b/mmdet/datasets/concat_dataset.py @@ -0,0 +1,22 @@ +import numpy as np +from torch.utils.data.dataset import ConcatDataset as _ConcatDataset + + +class ConcatDataset(_ConcatDataset): + """A wrapper of concatenated dataset. + + Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but + concat the group flag for image aspect ratio. + + Args: + datasets (list[:obj:`Dataset`]): A list of datasets. + """ + + def __init__(self, datasets): + super(ConcatDataset, self).__init__(datasets) + self.CLASSES = datasets[0].CLASSES + if hasattr(datasets[0], 'flag'): + flags = [] + for i in range(0, len(datasets)): + flags.append(datasets[i].flag) + self.flag = np.concatenate(flags) diff --git a/mmdet/datasets/custom.py b/mmdet/datasets/custom.py new file mode 100644 index 0000000..9bf4731 --- /dev/null +++ b/mmdet/datasets/custom.py @@ -0,0 +1,322 @@ +import os.path as osp + +import mmcv +import numpy as np +from mmcv.parallel import DataContainer as DC +from torch.utils.data import Dataset + +from .transforms import (ImageTransform, BboxTransform, MaskTransform, + SegMapTransform, Numpy2Tensor) +from .utils import to_tensor, random_scale +from .extra_aug import ExtraAugmentation + + +class CustomDataset(Dataset): + """Custom dataset for detection. + + Annotation format: + [ + { + 'filename': 'a.jpg', + 'width': 1280, + 'height': 720, + 'ann': { + 'bboxes': (n, 4), + 'labels': (n, ), + 'bboxes_ignore': (k, 4), + 'labels_ignore': (k, 4) (optional field) + } + }, + ... + ] + + The `ann` field is optional for testing. + """ + + CLASSES = None + + def __init__(self, + ann_file, + img_prefix, + img_scale, + img_norm_cfg, + multiscale_mode='value', + size_divisor=None, + proposal_file=None, + num_max_proposals=1000, + flip_ratio=0, + with_mask=True, + with_crowd=True, + with_label=True, + with_semantic_seg=False, + seg_prefix=None, + seg_scale_factor=1, + extra_aug=None, + resize_keep_ratio=True, + test_mode=False): + # prefix of images path + self.img_prefix = img_prefix + + # load annotations (and proposals) + self.img_infos = self.load_annotations(ann_file) + if proposal_file is not None: + self.proposals = self.load_proposals(proposal_file) + else: + self.proposals = None + # filter images with no annotation during training + if not test_mode: + valid_inds = self._filter_imgs() + self.img_infos = [self.img_infos[i] for i in valid_inds] + if self.proposals is not None: + self.proposals = [self.proposals[i] for i in valid_inds] + + # (long_edge, short_edge) or [(long1, short1), (long2, short2), ...] + self.img_scales = img_scale if isinstance(img_scale, + list) else [img_scale] + assert mmcv.is_list_of(self.img_scales, tuple) + # normalization configs + self.img_norm_cfg = img_norm_cfg + + # multi-scale mode (only applicable for multi-scale training) + self.multiscale_mode = multiscale_mode + assert multiscale_mode in ['value', 'range'] + + # max proposals per image + self.num_max_proposals = num_max_proposals + # flip ratio + self.flip_ratio = flip_ratio + assert flip_ratio >= 0 and flip_ratio <= 1 + # padding border to ensure the image size can be divided by + # size_divisor (used for FPN) + self.size_divisor = size_divisor + + # with mask or not (reserved field, takes no effect) + self.with_mask = with_mask + # some datasets provide bbox annotations as ignore/crowd/difficult, + # if `with_crowd` is True, then these info is returned. + self.with_crowd = with_crowd + # with label is False for RPN + self.with_label = with_label + # with semantic segmentation (stuff) annotation or not + self.with_seg = with_semantic_seg + # prefix of semantic segmentation map path + self.seg_prefix = seg_prefix + # rescale factor for segmentation maps + self.seg_scale_factor = seg_scale_factor + # in test mode or not + self.test_mode = test_mode + + # set group flag for the sampler + if not self.test_mode: + self._set_group_flag() + # transforms + self.img_transform = ImageTransform( + size_divisor=self.size_divisor, **self.img_norm_cfg) + self.bbox_transform = BboxTransform() + self.mask_transform = MaskTransform() + self.seg_transform = SegMapTransform(self.size_divisor) + self.numpy2tensor = Numpy2Tensor() + + # if use extra augmentation + if extra_aug is not None: + self.extra_aug = ExtraAugmentation(**extra_aug) + else: + self.extra_aug = None + + # image rescale if keep ratio + self.resize_keep_ratio = resize_keep_ratio + + def __len__(self): + return len(self.img_infos) + + def load_annotations(self, ann_file): + return mmcv.load(ann_file) + + def load_proposals(self, proposal_file): + return mmcv.load(proposal_file) + + def get_ann_info(self, idx): + return self.img_infos[idx]['ann'] + + def _filter_imgs(self, min_size=32): + """Filter images too small.""" + valid_inds = [] + for i, img_info in enumerate(self.img_infos): + if min(img_info['width'], img_info['height']) >= min_size: + valid_inds.append(i) + return valid_inds + + def _set_group_flag(self): + """Set flag according to image aspect ratio. + + Images with aspect ratio greater than 1 will be set as group 1, + otherwise group 0. + """ + self.flag = np.zeros(len(self), dtype=np.uint8) + for i in range(len(self)): + img_info = self.img_infos[i] + if img_info['width'] / img_info['height'] > 1: + self.flag[i] = 1 + + def _rand_another(self, idx): + pool = np.where(self.flag == self.flag[idx])[0] + return np.random.choice(pool) + + def __getitem__(self, idx): + if self.test_mode: + return self.prepare_test_img(idx) + while True: + data = self.prepare_train_img(idx) + if data is None: + idx = self._rand_another(idx) + continue + return data + + def prepare_train_img(self, idx): + img_info = self.img_infos[idx] + # load image + img = mmcv.imread(osp.join(self.img_prefix, img_info['filename'])) + # load proposals if necessary + if self.proposals is not None: + proposals = self.proposals[idx][:self.num_max_proposals] + # TODO: Handle empty proposals properly. Currently images with + # no proposals are just ignored, but they can be used for + # training in concept. + if len(proposals) == 0: + return None + if not (proposals.shape[1] == 4 or proposals.shape[1] == 5): + raise AssertionError( + 'proposals should have shapes (n, 4) or (n, 5), ' + 'but found {}'.format(proposals.shape)) + if proposals.shape[1] == 5: + scores = proposals[:, 4, None] + proposals = proposals[:, :4] + else: + scores = None + + ann = self.get_ann_info(idx) + gt_bboxes = ann['bboxes'] + gt_labels = ann['labels'] + if self.with_crowd: + gt_bboxes_ignore = ann['bboxes_ignore'] + + # skip the image if there is no valid gt bbox + if len(gt_bboxes) == 0: + return None + + # extra augmentation + if self.extra_aug is not None: + img, gt_bboxes, gt_labels = self.extra_aug(img, gt_bboxes, + gt_labels) + + # apply transforms + flip = True if np.random.rand() < self.flip_ratio else False + # randomly sample a scale + img_scale = random_scale(self.img_scales, self.multiscale_mode) + img, img_shape, pad_shape, scale_factor = self.img_transform( + img, img_scale, flip, keep_ratio=self.resize_keep_ratio) + img = img.copy() + if self.with_seg: + gt_seg = mmcv.imread( + osp.join(self.seg_prefix, img_info['file_name'].replace( + 'jpg', 'png')), + flag='unchanged') + gt_seg = self.seg_transform(gt_seg.squeeze(), img_scale, flip) + gt_seg = mmcv.imrescale( + gt_seg, self.seg_scale_factor, interpolation='nearest') + gt_seg = gt_seg[None, ...] + if self.proposals is not None: + proposals = self.bbox_transform(proposals, img_shape, scale_factor, + flip) + proposals = np.hstack( + [proposals, scores]) if scores is not None else proposals + gt_bboxes = self.bbox_transform(gt_bboxes, img_shape, scale_factor, + flip) + if self.with_crowd: + gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape, + scale_factor, flip) + if self.with_mask: + gt_masks = self.mask_transform(ann['masks'], pad_shape, + scale_factor, flip) + + ori_shape = (img_info['height'], img_info['width'], 3) + img_meta = dict( + ori_shape=ori_shape, + img_shape=img_shape, + pad_shape=pad_shape, + scale_factor=scale_factor, + flip=flip) + + data = dict( + img=DC(to_tensor(img), stack=True), + img_meta=DC(img_meta, cpu_only=True), + gt_bboxes=DC(to_tensor(gt_bboxes))) + if self.proposals is not None: + data['proposals'] = DC(to_tensor(proposals)) + if self.with_label: + data['gt_labels'] = DC(to_tensor(gt_labels)) + if self.with_crowd: + data['gt_bboxes_ignore'] = DC(to_tensor(gt_bboxes_ignore)) + if self.with_mask: + data['gt_masks'] = DC(gt_masks, cpu_only=True) + if self.with_seg: + data['gt_semantic_seg'] = DC(to_tensor(gt_seg), stack=True) + return data + + def prepare_test_img(self, idx): + """Prepare an image for testing (multi-scale and flipping)""" + img_info = self.img_infos[idx] + img = mmcv.imread(osp.join(self.img_prefix, img_info['filename'])) + if self.proposals is not None: + proposal = self.proposals[idx][:self.num_max_proposals] + if not (proposal.shape[1] == 4 or proposal.shape[1] == 5): + raise AssertionError( + 'proposals should have shapes (n, 4) or (n, 5), ' + 'but found {}'.format(proposal.shape)) + else: + proposal = None + + def prepare_single(img, scale, flip, proposal=None): + _img, img_shape, pad_shape, scale_factor = self.img_transform( + img, scale, flip, keep_ratio=self.resize_keep_ratio) + _img = to_tensor(_img) + _img_meta = dict( + ori_shape=(img_info['height'], img_info['width'], 3), + img_shape=img_shape, + pad_shape=pad_shape, + scale_factor=scale_factor, + flip=flip) + if proposal is not None: + if proposal.shape[1] == 5: + score = proposal[:, 4, None] + proposal = proposal[:, :4] + else: + score = None + _proposal = self.bbox_transform(proposal, img_shape, + scale_factor, flip) + _proposal = np.hstack( + [_proposal, score]) if score is not None else _proposal + _proposal = to_tensor(_proposal) + else: + _proposal = None + return _img, _img_meta, _proposal + + imgs = [] + img_metas = [] + proposals = [] + for scale in self.img_scales: + _img, _img_meta, _proposal = prepare_single( + img, scale, False, proposal) + imgs.append(_img) + img_metas.append(DC(_img_meta, cpu_only=True)) + proposals.append(_proposal) + if self.flip_ratio > 0: + _img, _img_meta, _proposal = prepare_single( + img, scale, True, proposal) + imgs.append(_img) + img_metas.append(DC(_img_meta, cpu_only=True)) + proposals.append(_proposal) + data = dict(img=imgs, img_meta=img_metas) + if self.proposals is not None: + data['proposals'] = proposals + return data diff --git a/mmdet/datasets/extra_aug.py b/mmdet/datasets/extra_aug.py new file mode 100644 index 0000000..a9f4f44 --- /dev/null +++ b/mmdet/datasets/extra_aug.py @@ -0,0 +1,163 @@ +import mmcv +import numpy as np +from numpy import random + +from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps + + +class PhotoMetricDistortion(object): + + def __init__(self, + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18): + self.brightness_delta = brightness_delta + self.contrast_lower, self.contrast_upper = contrast_range + self.saturation_lower, self.saturation_upper = saturation_range + self.hue_delta = hue_delta + + def __call__(self, img, boxes, labels): + # random brightness + if random.randint(2): + delta = random.uniform(-self.brightness_delta, + self.brightness_delta) + img += delta + + # mode == 0 --> do random contrast first + # mode == 1 --> do random contrast last + mode = random.randint(2) + if mode == 1: + if random.randint(2): + alpha = random.uniform(self.contrast_lower, + self.contrast_upper) + img *= alpha + + # convert color from BGR to HSV + img = mmcv.bgr2hsv(img) + + # random saturation + if random.randint(2): + img[..., 1] *= random.uniform(self.saturation_lower, + self.saturation_upper) + + # random hue + if random.randint(2): + img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta) + img[..., 0][img[..., 0] > 360] -= 360 + img[..., 0][img[..., 0] < 0] += 360 + + # convert color from HSV to BGR + img = mmcv.hsv2bgr(img) + + # random contrast + if mode == 0: + if random.randint(2): + alpha = random.uniform(self.contrast_lower, + self.contrast_upper) + img *= alpha + + # randomly swap channels + if random.randint(2): + img = img[..., random.permutation(3)] + + return img, boxes, labels + + +class Expand(object): + + def __init__(self, mean=(0, 0, 0), to_rgb=True, ratio_range=(1, 4)): + if to_rgb: + self.mean = mean[::-1] + else: + self.mean = mean + self.min_ratio, self.max_ratio = ratio_range + + def __call__(self, img, boxes, labels): + if random.randint(2): + return img, boxes, labels + + h, w, c = img.shape + ratio = random.uniform(self.min_ratio, self.max_ratio) + expand_img = np.full((int(h * ratio), int(w * ratio), c), + self.mean).astype(img.dtype) + left = int(random.uniform(0, w * ratio - w)) + top = int(random.uniform(0, h * ratio - h)) + expand_img[top:top + h, left:left + w] = img + img = expand_img + boxes += np.tile((left, top), 2) + return img, boxes, labels + + +class RandomCrop(object): + + def __init__(self, min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3): + # 1: return ori img + self.sample_mode = (1, *min_ious, 0) + self.min_crop_size = min_crop_size + + def __call__(self, img, boxes, labels): + h, w, c = img.shape + while True: + mode = random.choice(self.sample_mode) + if mode == 1: + return img, boxes, labels + + min_iou = mode + for i in range(50): + new_w = random.uniform(self.min_crop_size * w, w) + new_h = random.uniform(self.min_crop_size * h, h) + + # h / w in [0.5, 2] + if new_h / new_w < 0.5 or new_h / new_w > 2: + continue + + left = random.uniform(w - new_w) + top = random.uniform(h - new_h) + + patch = np.array((int(left), int(top), int(left + new_w), + int(top + new_h))) + overlaps = bbox_overlaps( + patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1) + if overlaps.min() < min_iou: + continue + + # center of boxes should inside the crop img + center = (boxes[:, :2] + boxes[:, 2:]) / 2 + mask = (center[:, 0] > patch[0]) * ( + center[:, 1] > patch[1]) * (center[:, 0] < patch[2]) * ( + center[:, 1] < patch[3]) + if not mask.any(): + continue + boxes = boxes[mask] + labels = labels[mask] + + # adjust boxes + img = img[patch[1]:patch[3], patch[0]:patch[2]] + boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:]) + boxes[:, :2] = boxes[:, :2].clip(min=patch[:2]) + boxes -= np.tile(patch[:2], 2) + + return img, boxes, labels + + +class ExtraAugmentation(object): + + def __init__(self, + photo_metric_distortion=None, + expand=None, + random_crop=None): + self.transforms = [] + if photo_metric_distortion is not None: + self.transforms.append( + PhotoMetricDistortion(**photo_metric_distortion)) + if expand is not None: + self.transforms.append(Expand(**expand)) + if random_crop is not None: + self.transforms.append(RandomCrop(**random_crop)) + + def __call__(self, img, boxes, labels): + img = img.astype(np.float32) + for transform in self.transforms: + img, boxes, labels = transform(img, boxes, labels) + return img, boxes, labels diff --git a/mmdet/datasets/loader/__init__.py b/mmdet/datasets/loader/__init__.py new file mode 100644 index 0000000..1f23fa4 --- /dev/null +++ b/mmdet/datasets/loader/__init__.py @@ -0,0 +1,4 @@ +from .build_loader import build_dataloader +from .sampler import GroupSampler, DistributedGroupSampler + +__all__ = ['GroupSampler', 'DistributedGroupSampler', 'build_dataloader'] diff --git a/mmdet/datasets/loader/build_loader.py b/mmdet/datasets/loader/build_loader.py new file mode 100644 index 0000000..8759b06 --- /dev/null +++ b/mmdet/datasets/loader/build_loader.py @@ -0,0 +1,46 @@ +from functools import partial + +from mmcv.runner import get_dist_info +from mmcv.parallel import collate +from torch.utils.data import DataLoader + +from .sampler import GroupSampler, DistributedGroupSampler, DistributedSampler + +# https://github.com/pytorch/pytorch/issues/973 +import resource +rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) +resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1])) + + +def build_dataloader(dataset, + imgs_per_gpu, + workers_per_gpu, + num_gpus=1, + dist=True, + **kwargs): + shuffle = kwargs.get('shuffle', True) + if dist: + rank, world_size = get_dist_info() + if shuffle: + sampler = DistributedGroupSampler(dataset, imgs_per_gpu, + world_size, rank) + else: + sampler = DistributedSampler( + dataset, world_size, rank, shuffle=False) + batch_size = imgs_per_gpu + num_workers = workers_per_gpu + else: + sampler = GroupSampler(dataset, imgs_per_gpu) if shuffle else None + batch_size = num_gpus * imgs_per_gpu + num_workers = num_gpus * workers_per_gpu + + data_loader = DataLoader( + dataset, + batch_size=batch_size, + sampler=sampler, + num_workers=num_workers, + collate_fn=partial(collate, samples_per_gpu=imgs_per_gpu), + pin_memory=False, + **kwargs) + + return data_loader diff --git a/mmdet/datasets/loader/sampler.py b/mmdet/datasets/loader/sampler.py new file mode 100644 index 0000000..1e454b7 --- /dev/null +++ b/mmdet/datasets/loader/sampler.py @@ -0,0 +1,159 @@ +from __future__ import division + +import math +import torch +import numpy as np + +from torch.distributed import get_world_size, get_rank +from torch.utils.data import Sampler +from torch.utils.data import DistributedSampler as _DistributedSampler + + +class DistributedSampler(_DistributedSampler): + + def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True): + super().__init__(dataset, num_replicas=num_replicas, rank=rank) + self.shuffle = shuffle + + def __iter__(self): + # deterministically shuffle based on epoch + if self.shuffle: + g = torch.Generator() + g.manual_seed(self.epoch) + indices = torch.randperm(len(self.dataset), generator=g).tolist() + else: + indices = torch.arange(len(self.dataset)).tolist() + + # add extra samples to make it evenly divisible + indices += indices[:(self.total_size - len(indices))] + assert len(indices) == self.total_size + + # subsample + indices = indices[self.rank:self.total_size:self.num_replicas] + assert len(indices) == self.num_samples + + return iter(indices) + + +class GroupSampler(Sampler): + + def __init__(self, dataset, samples_per_gpu=1): + assert hasattr(dataset, 'flag') + self.dataset = dataset + self.samples_per_gpu = samples_per_gpu + self.flag = dataset.flag.astype(np.int64) + self.group_sizes = np.bincount(self.flag) + self.num_samples = 0 + for i, size in enumerate(self.group_sizes): + self.num_samples += int(np.ceil( + size / self.samples_per_gpu)) * self.samples_per_gpu + + def __iter__(self): + indices = [] + for i, size in enumerate(self.group_sizes): + if size == 0: + continue + indice = np.where(self.flag == i)[0] + assert len(indice) == size + np.random.shuffle(indice) + num_extra = int(np.ceil(size / self.samples_per_gpu) + ) * self.samples_per_gpu - len(indice) + indice = np.concatenate([indice, indice[:num_extra]]) + indices.append(indice) + indices = np.concatenate(indices) + indices = [ + indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu] + for i in np.random.permutation( + range(len(indices) // self.samples_per_gpu)) + ] + indices = np.concatenate(indices) + indices = torch.from_numpy(indices).long() + assert len(indices) == self.num_samples + return iter(indices) + + def __len__(self): + return self.num_samples + + +class DistributedGroupSampler(Sampler): + """Sampler that restricts data loading to a subset of the dataset. + It is especially useful in conjunction with + :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each + process can pass a DistributedSampler instance as a DataLoader sampler, + and load a subset of the original dataset that is exclusive to it. + .. note:: + Dataset is assumed to be of constant size. + Arguments: + dataset: Dataset used for sampling. + num_replicas (optional): Number of processes participating in + distributed training. + rank (optional): Rank of the current process within num_replicas. + """ + + def __init__(self, + dataset, + samples_per_gpu=1, + num_replicas=None, + rank=None): + if num_replicas is None: + num_replicas = get_world_size() + if rank is None: + rank = get_rank() + self.dataset = dataset + self.samples_per_gpu = samples_per_gpu + self.num_replicas = num_replicas + self.rank = rank + self.epoch = 0 + + assert hasattr(self.dataset, 'flag') + self.flag = self.dataset.flag + self.group_sizes = np.bincount(self.flag) + + self.num_samples = 0 + for i, j in enumerate(self.group_sizes): + self.num_samples += int( + math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu / + self.num_replicas)) * self.samples_per_gpu + self.total_size = self.num_samples * self.num_replicas + + def __iter__(self): + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + + indices = [] + for i, size in enumerate(self.group_sizes): + if size > 0: + indice = np.where(self.flag == i)[0] + assert len(indice) == size + indice = indice[list(torch.randperm(int(size), + generator=g))].tolist() + extra = int( + math.ceil( + size * 1.0 / self.samples_per_gpu / self.num_replicas) + ) * self.samples_per_gpu * self.num_replicas - len(indice) + indice += indice[:extra] + indices += indice + + assert len(indices) == self.total_size + + indices = [ + indices[j] for i in list( + torch.randperm( + len(indices) // self.samples_per_gpu, generator=g)) + for j in range(i * self.samples_per_gpu, (i + 1) * + self.samples_per_gpu) + ] + + # subsample + offset = self.num_samples * self.rank + indices = indices[offset:offset + self.num_samples] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples + + def set_epoch(self, epoch): + self.epoch = epoch diff --git a/mmdet/datasets/repeat_dataset.py b/mmdet/datasets/repeat_dataset.py new file mode 100644 index 0000000..7e99293 --- /dev/null +++ b/mmdet/datasets/repeat_dataset.py @@ -0,0 +1,19 @@ +import numpy as np + + +class RepeatDataset(object): + + def __init__(self, dataset, times): + self.dataset = dataset + self.times = times + self.CLASSES = dataset.CLASSES + if hasattr(self.dataset, 'flag'): + self.flag = np.tile(self.dataset.flag, times) + + self._ori_len = len(self.dataset) + + def __getitem__(self, idx): + return self.dataset[idx % self._ori_len] + + def __len__(self): + return self.times * self._ori_len diff --git a/mmdet/datasets/transforms.py b/mmdet/datasets/transforms.py new file mode 100644 index 0000000..ff575db --- /dev/null +++ b/mmdet/datasets/transforms.py @@ -0,0 +1,147 @@ +import mmcv +import numpy as np +import torch + +__all__ = [ + 'ImageTransform', 'BboxTransform', 'MaskTransform', 'SegMapTransform', + 'Numpy2Tensor' +] + + +class ImageTransform(object): + """Preprocess an image. + + 1. rescale the image to expected size + 2. normalize the image + 3. flip the image (if needed) + 4. pad the image (if needed) + 5. transpose to (c, h, w) + """ + + def __init__(self, + mean=(0, 0, 0), + std=(1, 1, 1), + to_rgb=True, + size_divisor=None): + self.mean = np.array(mean, dtype=np.float32) + self.std = np.array(std, dtype=np.float32) + self.to_rgb = to_rgb + self.size_divisor = size_divisor + + def __call__(self, img, scale, flip=False, keep_ratio=True): + if keep_ratio: + img, scale_factor = mmcv.imrescale(img, scale, return_scale=True) + else: + img, w_scale, h_scale = mmcv.imresize( + img, scale, return_scale=True) + scale_factor = np.array( + [w_scale, h_scale, w_scale, h_scale], dtype=np.float32) + img_shape = img.shape + img = mmcv.imnormalize(img, self.mean, self.std, self.to_rgb) + if flip: + img = mmcv.imflip(img) + if self.size_divisor is not None: + img = mmcv.impad_to_multiple(img, self.size_divisor) + pad_shape = img.shape + else: + pad_shape = img_shape + img = img.transpose(2, 0, 1) + return img, img_shape, pad_shape, scale_factor + + +def bbox_flip(bboxes, img_shape): + """Flip bboxes horizontally. + + Args: + bboxes(ndarray): shape (..., 4*k) + img_shape(tuple): (height, width) + """ + assert bboxes.shape[-1] % 4 == 0 + w = img_shape[1] + flipped = bboxes.copy() + flipped[..., 0::4] = w - bboxes[..., 2::4] - 1 + flipped[..., 2::4] = w - bboxes[..., 0::4] - 1 + return flipped + + +class BboxTransform(object): + """Preprocess gt bboxes. + + 1. rescale bboxes according to image size + 2. flip bboxes (if needed) + 3. pad the first dimension to `max_num_gts` + """ + + def __init__(self, max_num_gts=None): + self.max_num_gts = max_num_gts + + def __call__(self, bboxes, img_shape, scale_factor, flip=False): + gt_bboxes = bboxes * scale_factor + if flip: + gt_bboxes = bbox_flip(gt_bboxes, img_shape) + gt_bboxes[:, 0::2] = np.clip(gt_bboxes[:, 0::2], 0, img_shape[1] - 1) + gt_bboxes[:, 1::2] = np.clip(gt_bboxes[:, 1::2], 0, img_shape[0] - 1) + if self.max_num_gts is None: + return gt_bboxes + else: + num_gts = gt_bboxes.shape[0] + padded_bboxes = np.zeros((self.max_num_gts, 4), dtype=np.float32) + padded_bboxes[:num_gts, :] = gt_bboxes + return padded_bboxes + + +class MaskTransform(object): + """Preprocess masks. + + 1. resize masks to expected size and stack to a single array + 2. flip the masks (if needed) + 3. pad the masks (if needed) + """ + + def __call__(self, masks, pad_shape, scale_factor, flip=False): + masks = [ + mmcv.imrescale(mask, scale_factor, interpolation='nearest') + for mask in masks + ] + if flip: + masks = [mask[:, ::-1] for mask in masks] + padded_masks = [ + mmcv.impad(mask, pad_shape[:2], pad_val=0) for mask in masks + ] + padded_masks = np.stack(padded_masks, axis=0) + return padded_masks + + +class SegMapTransform(object): + """Preprocess semantic segmentation maps. + + 1. rescale the segmentation map to expected size + 3. flip the image (if needed) + 4. pad the image (if needed) + """ + + def __init__(self, size_divisor=None): + self.size_divisor = size_divisor + + def __call__(self, img, scale, flip=False, keep_ratio=True): + if keep_ratio: + img = mmcv.imrescale(img, scale, interpolation='nearest') + else: + img = mmcv.imresize(img, scale, interpolation='nearest') + if flip: + img = mmcv.imflip(img) + if self.size_divisor is not None: + img = mmcv.impad_to_multiple(img, self.size_divisor) + return img + + +class Numpy2Tensor(object): + + def __init__(self): + pass + + def __call__(self, *args): + if len(args) == 1: + return torch.from_numpy(args[0]) + else: + return tuple([torch.from_numpy(np.array(array)) for array in args]) diff --git a/mmdet/datasets/utils.py b/mmdet/datasets/utils.py new file mode 100644 index 0000000..8fdba7f --- /dev/null +++ b/mmdet/datasets/utils.py @@ -0,0 +1,116 @@ +import copy +from collections import Sequence + +import mmcv +from mmcv.runner import obj_from_dict +import torch + +import matplotlib.pyplot as plt +import numpy as np +from .concat_dataset import ConcatDataset +from .repeat_dataset import RepeatDataset +from .. import datasets + + +def to_tensor(data): + """Convert objects of various python types to :obj:`torch.Tensor`. + + Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, + :class:`Sequence`, :class:`int` and :class:`float`. + """ + if isinstance(data, torch.Tensor): + return data + elif isinstance(data, np.ndarray): + return torch.from_numpy(data) + elif isinstance(data, Sequence) and not mmcv.is_str(data): + return torch.tensor(data) + elif isinstance(data, int): + return torch.LongTensor([data]) + elif isinstance(data, float): + return torch.FloatTensor([data]) + else: + raise TypeError('type {} cannot be converted to tensor.'.format( + type(data))) + + +def random_scale(img_scales, mode='range'): + """Randomly select a scale from a list of scales or scale ranges. + + Args: + img_scales (list[tuple]): Image scale or scale range. + mode (str): "range" or "value". + + Returns: + tuple: Sampled image scale. + """ + num_scales = len(img_scales) + if num_scales == 1: # fixed scale is specified + img_scale = img_scales[0] + elif num_scales == 2: # randomly sample a scale + if mode == 'range': + img_scale_long = [max(s) for s in img_scales] + img_scale_short = [min(s) for s in img_scales] + long_edge = np.random.randint( + min(img_scale_long), + max(img_scale_long) + 1) + short_edge = np.random.randint( + min(img_scale_short), + max(img_scale_short) + 1) + img_scale = (long_edge, short_edge) + elif mode == 'value': + img_scale = img_scales[np.random.randint(num_scales)] + else: + if mode != 'value': + raise ValueError( + 'Only "value" mode supports more than 2 image scales') + img_scale = img_scales[np.random.randint(num_scales)] + return img_scale + + +def show_ann(coco, img, ann_info): + plt.imshow(mmcv.bgr2rgb(img)) + plt.axis('off') + coco.showAnns(ann_info) + plt.show() + + +def get_dataset(data_cfg): + if data_cfg['type'] == 'RepeatDataset': + return RepeatDataset( + get_dataset(data_cfg['dataset']), data_cfg['times']) + + if isinstance(data_cfg['ann_file'], (list, tuple)): + ann_files = data_cfg['ann_file'] + num_dset = len(ann_files) + else: + ann_files = [data_cfg['ann_file']] + num_dset = 1 + + if 'proposal_file' in data_cfg.keys(): + if isinstance(data_cfg['proposal_file'], (list, tuple)): + proposal_files = data_cfg['proposal_file'] + else: + proposal_files = [data_cfg['proposal_file']] + else: + proposal_files = [None] * num_dset + assert len(proposal_files) == num_dset + + if isinstance(data_cfg['img_prefix'], (list, tuple)): + img_prefixes = data_cfg['img_prefix'] + else: + img_prefixes = [data_cfg['img_prefix']] * num_dset + assert len(img_prefixes) == num_dset + + dsets = [] + for i in range(num_dset): + data_info = copy.deepcopy(data_cfg) + data_info['ann_file'] = ann_files[i] + data_info['proposal_file'] = proposal_files[i] + data_info['img_prefix'] = img_prefixes[i] + dset = obj_from_dict(data_info, datasets) + dsets.append(dset) + if len(dsets) > 1: + dset = ConcatDataset(dsets) + else: + dset = dsets[0] + return dset diff --git a/mmdet/datasets/voc.py b/mmdet/datasets/voc.py new file mode 100644 index 0000000..ba1c772 --- /dev/null +++ b/mmdet/datasets/voc.py @@ -0,0 +1,18 @@ +from .xml_style import XMLDataset + + +class VOCDataset(XMLDataset): + + CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', + 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', + 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', + 'tvmonitor') + + def __init__(self, **kwargs): + super(VOCDataset, self).__init__(**kwargs) + if 'VOC2007' in self.img_prefix: + self.year = 2007 + elif 'VOC2012' in self.img_prefix: + self.year = 2012 + else: + raise ValueError('Cannot infer dataset year from img_prefix') diff --git a/mmdet/datasets/xml_style.py b/mmdet/datasets/xml_style.py new file mode 100644 index 0000000..40e3374 --- /dev/null +++ b/mmdet/datasets/xml_style.py @@ -0,0 +1,76 @@ +import os.path as osp +import xml.etree.ElementTree as ET + +import mmcv +import numpy as np + +from .custom import CustomDataset + + +class XMLDataset(CustomDataset): + + def __init__(self, **kwargs): + super(XMLDataset, self).__init__(**kwargs) + self.cat2label = {cat: i + 1 for i, cat in enumerate(self.CLASSES)} + + def load_annotations(self, ann_file): + img_infos = [] + img_ids = mmcv.list_from_file(ann_file) + for img_id in img_ids: + filename = 'JPEGImages/{}.jpg'.format(img_id) + xml_path = osp.join(self.img_prefix, 'Annotations', + '{}.xml'.format(img_id)) + tree = ET.parse(xml_path) + root = tree.getroot() + size = root.find('size') + width = int(size.find('width').text) + height = int(size.find('height').text) + img_infos.append( + dict(id=img_id, filename=filename, width=width, height=height)) + return img_infos + + def get_ann_info(self, idx): + img_id = self.img_infos[idx]['id'] + xml_path = osp.join(self.img_prefix, 'Annotations', + '{}.xml'.format(img_id)) + tree = ET.parse(xml_path) + root = tree.getroot() + bboxes = [] + labels = [] + bboxes_ignore = [] + labels_ignore = [] + for obj in root.findall('object'): + name = obj.find('name').text + label = self.cat2label[name] + difficult = int(obj.find('difficult').text) + bnd_box = obj.find('bndbox') + bbox = [ + int(bnd_box.find('xmin').text), + int(bnd_box.find('ymin').text), + int(bnd_box.find('xmax').text), + int(bnd_box.find('ymax').text) + ] + if difficult: + bboxes_ignore.append(bbox) + labels_ignore.append(label) + else: + bboxes.append(bbox) + labels.append(label) + if not bboxes: + bboxes = np.zeros((0, 4)) + labels = np.zeros((0, )) + else: + bboxes = np.array(bboxes, ndmin=2) - 1 + labels = np.array(labels) + if not bboxes_ignore: + bboxes_ignore = np.zeros((0, 4)) + labels_ignore = np.zeros((0, )) + else: + bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1 + labels_ignore = np.array(labels_ignore) + ann = dict( + bboxes=bboxes.astype(np.float32), + labels=labels.astype(np.int64), + bboxes_ignore=bboxes_ignore.astype(np.float32), + labels_ignore=labels_ignore.astype(np.int64)) + return ann diff --git a/mmdet/models/__init__.py b/mmdet/models/__init__.py new file mode 100644 index 0000000..7925e6a --- /dev/null +++ b/mmdet/models/__init__.py @@ -0,0 +1,18 @@ +from .backbones import * # noqa: F401,F403 +from .necks import * # noqa: F401,F403 +from .roi_extractors import * # noqa: F401,F403 +from .anchor_heads import * # noqa: F401,F403 +from .shared_heads import * # noqa: F401,F403 +from .bbox_heads import * # noqa: F401,F403 +from .mask_heads import * # noqa: F401,F403 +from .detectors import * # noqa: F401,F403 +from .registry import (BACKBONES, NECKS, ROI_EXTRACTORS, SHARED_HEADS, HEADS, + DETECTORS) +from .builder import (build_backbone, build_neck, build_roi_extractor, + build_shared_head, build_head, build_detector) + +__all__ = [ + 'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', + 'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor', + 'build_shared_head', 'build_head', 'build_detector' +] diff --git a/mmdet/models/anchor_heads/__init__.py b/mmdet/models/anchor_heads/__init__.py new file mode 100644 index 0000000..86877a2 --- /dev/null +++ b/mmdet/models/anchor_heads/__init__.py @@ -0,0 +1,7 @@ +from .anchor_head import AnchorHead +from .fcos_head import FCOSHead +from .retina_head import RetinaHead +from .rpn_head import RPNHead +from .ssd_head import SSDHead + +__all__ = ['AnchorHead', 'RPNHead', 'RetinaHead', 'SSDHead', 'FCOSHead'] diff --git a/mmdet/models/anchor_heads/anchor_head.py b/mmdet/models/anchor_heads/anchor_head.py new file mode 100644 index 0000000..881415b --- /dev/null +++ b/mmdet/models/anchor_heads/anchor_head.py @@ -0,0 +1,284 @@ +from __future__ import division + +import numpy as np +import torch +import torch.nn as nn +from mmcv.cnn import normal_init + +from mmdet.core import (AnchorGenerator, anchor_target, delta2bbox, + multi_apply, weighted_cross_entropy, weighted_smoothl1, + weighted_binary_cross_entropy, + weighted_sigmoid_focal_loss, multiclass_nms) +from ..registry import HEADS + + +@HEADS.register_module +class AnchorHead(nn.Module): + """Anchor-based head (RPN, RetinaNet, SSD, etc.). + + Args: + in_channels (int): Number of channels in the input feature map. + feat_channels (int): Number of channels of the feature map. + anchor_scales (Iterable): Anchor scales. + anchor_ratios (Iterable): Anchor aspect ratios. + anchor_strides (Iterable): Anchor strides. + anchor_base_sizes (Iterable): Anchor base sizes. + target_means (Iterable): Mean values of regression targets. + target_stds (Iterable): Std values of regression targets. + use_sigmoid_cls (bool): Whether to use sigmoid loss for + classification. (softmax by default) + cls_focal_loss (bool): Whether to use focal loss for classification. + """ # noqa: W605 + + def __init__(self, + num_classes, + in_channels, + feat_channels=256, + anchor_scales=[8, 16, 32], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + anchor_base_sizes=None, + target_means=(.0, .0, .0, .0), + target_stds=(1.0, 1.0, 1.0, 1.0), + use_sigmoid_cls=False, + cls_focal_loss=False): + super(AnchorHead, self).__init__() + self.in_channels = in_channels + self.num_classes = num_classes + self.feat_channels = feat_channels + self.anchor_scales = anchor_scales + self.anchor_ratios = anchor_ratios + self.anchor_strides = anchor_strides + self.anchor_base_sizes = list( + anchor_strides) if anchor_base_sizes is None else anchor_base_sizes + self.target_means = target_means + self.target_stds = target_stds + self.use_sigmoid_cls = use_sigmoid_cls + self.cls_focal_loss = cls_focal_loss + + self.anchor_generators = [] + for anchor_base in self.anchor_base_sizes: + self.anchor_generators.append( + AnchorGenerator(anchor_base, anchor_scales, anchor_ratios)) + + self.num_anchors = len(self.anchor_ratios) * len(self.anchor_scales) + if self.use_sigmoid_cls: + self.cls_out_channels = self.num_classes - 1 + else: + self.cls_out_channels = self.num_classes + + self._init_layers() + + def _init_layers(self): + self.conv_cls = nn.Conv2d(self.feat_channels, + self.num_anchors * self.cls_out_channels, 1) + self.conv_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1) + + def init_weights(self): + normal_init(self.conv_cls, std=0.01) + normal_init(self.conv_reg, std=0.01) + + def forward_single(self, x): + cls_score = self.conv_cls(x) + bbox_pred = self.conv_reg(x) + return cls_score, bbox_pred + + def forward(self, feats): + return multi_apply(self.forward_single, feats) + + def get_anchors(self, featmap_sizes, img_metas): + """Get anchors according to feature map sizes. + + Args: + featmap_sizes (list[tuple]): Multi-level feature map sizes. + img_metas (list[dict]): Image meta info. + + Returns: + tuple: anchors of each image, valid flags of each image + """ + num_imgs = len(img_metas) + num_levels = len(featmap_sizes) + + # since feature map sizes of all images are the same, we only compute + # anchors for one time + multi_level_anchors = [] + for i in range(num_levels): + anchors = self.anchor_generators[i].grid_anchors( + featmap_sizes[i], self.anchor_strides[i]) + multi_level_anchors.append(anchors) + anchor_list = [multi_level_anchors for _ in range(num_imgs)] + + # for each image, we compute valid flags of multi level anchors + valid_flag_list = [] + for img_id, img_meta in enumerate(img_metas): + multi_level_flags = [] + for i in range(num_levels): + anchor_stride = self.anchor_strides[i] + feat_h, feat_w = featmap_sizes[i] + h, w, _ = img_meta['pad_shape'] + valid_feat_h = min(int(np.ceil(h / anchor_stride)), feat_h) + valid_feat_w = min(int(np.ceil(w / anchor_stride)), feat_w) + flags = self.anchor_generators[i].valid_flags( + (feat_h, feat_w), (valid_feat_h, valid_feat_w)) + multi_level_flags.append(flags) + valid_flag_list.append(multi_level_flags) + + return anchor_list, valid_flag_list + + def loss_single(self, cls_score, bbox_pred, labels, label_weights, + bbox_targets, bbox_weights, num_total_samples, cfg): + # classification loss + labels = labels.reshape(-1) + label_weights = label_weights.reshape(-1) + cls_score = cls_score.permute(0, 2, 3, 1).reshape( + -1, self.cls_out_channels) + if self.use_sigmoid_cls: + if self.cls_focal_loss: + cls_criterion = weighted_sigmoid_focal_loss + else: + cls_criterion = weighted_binary_cross_entropy + else: + if self.cls_focal_loss: + raise NotImplementedError + else: + cls_criterion = weighted_cross_entropy + if self.cls_focal_loss: + loss_cls = cls_criterion( + cls_score, + labels, + label_weights, + gamma=cfg.gamma, + alpha=cfg.alpha, + avg_factor=num_total_samples) + else: + loss_cls = cls_criterion( + cls_score, labels, label_weights, avg_factor=num_total_samples) + # regression loss + bbox_targets = bbox_targets.reshape(-1, 4) + bbox_weights = bbox_weights.reshape(-1, 4) + bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) + loss_reg = weighted_smoothl1( + bbox_pred, + bbox_targets, + bbox_weights, + beta=cfg.smoothl1_beta, + avg_factor=num_total_samples) + return loss_cls, loss_reg + + def loss(self, + cls_scores, + bbox_preds, + gt_bboxes, + gt_labels, + img_metas, + cfg, + gt_bboxes_ignore=None): + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == len(self.anchor_generators) + + anchor_list, valid_flag_list = self.get_anchors( + featmap_sizes, img_metas) + sampling = False if self.cls_focal_loss else True + label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 + cls_reg_targets = anchor_target( + anchor_list, + valid_flag_list, + gt_bboxes, + img_metas, + self.target_means, + self.target_stds, + cfg, + gt_bboxes_ignore_list=gt_bboxes_ignore, + gt_labels_list=gt_labels, + label_channels=label_channels, + sampling=sampling) + if cls_reg_targets is None: + return None + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + num_total_pos, num_total_neg) = cls_reg_targets + num_total_samples = ( + num_total_pos + if self.cls_focal_loss else num_total_pos + num_total_neg) + losses_cls, losses_reg = multi_apply( + self.loss_single, + cls_scores, + bbox_preds, + labels_list, + label_weights_list, + bbox_targets_list, + bbox_weights_list, + num_total_samples=num_total_samples, + cfg=cfg) + return dict(loss_cls=losses_cls, loss_reg=losses_reg) + + def get_bboxes(self, cls_scores, bbox_preds, img_metas, cfg, + rescale=False): + assert len(cls_scores) == len(bbox_preds) + num_levels = len(cls_scores) + + mlvl_anchors = [ + self.anchor_generators[i].grid_anchors(cls_scores[i].size()[-2:], + self.anchor_strides[i]) + for i in range(num_levels) + ] + result_list = [] + for img_id in range(len(img_metas)): + cls_score_list = [ + cls_scores[i][img_id].detach() for i in range(num_levels) + ] + bbox_pred_list = [ + bbox_preds[i][img_id].detach() for i in range(num_levels) + ] + img_shape = img_metas[img_id]['img_shape'] + scale_factor = img_metas[img_id]['scale_factor'] + proposals = self.get_bboxes_single(cls_score_list, bbox_pred_list, + mlvl_anchors, img_shape, + scale_factor, cfg, rescale) + result_list.append(proposals) + return result_list + + def get_bboxes_single(self, + cls_scores, + bbox_preds, + mlvl_anchors, + img_shape, + scale_factor, + cfg, + rescale=False): + assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors) + mlvl_bboxes = [] + mlvl_scores = [] + for cls_score, bbox_pred, anchors in zip(cls_scores, bbox_preds, + mlvl_anchors): + assert cls_score.size()[-2:] == bbox_pred.size()[-2:] + cls_score = cls_score.permute(1, 2, 0).reshape( + -1, self.cls_out_channels) + if self.use_sigmoid_cls: + scores = cls_score.sigmoid() + else: + scores = cls_score.softmax(-1) + bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) + nms_pre = cfg.get('nms_pre', -1) + if nms_pre > 0 and scores.shape[0] > nms_pre: + if self.use_sigmoid_cls: + max_scores, _ = scores.max(dim=1) + else: + max_scores, _ = scores[:, 1:].max(dim=1) + _, topk_inds = max_scores.topk(nms_pre) + anchors = anchors[topk_inds, :] + bbox_pred = bbox_pred[topk_inds, :] + scores = scores[topk_inds, :] + bboxes = delta2bbox(anchors, bbox_pred, self.target_means, + self.target_stds, img_shape) + mlvl_bboxes.append(bboxes) + mlvl_scores.append(scores) + mlvl_bboxes = torch.cat(mlvl_bboxes) + if rescale: + mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) + mlvl_scores = torch.cat(mlvl_scores) + if self.use_sigmoid_cls: + padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) + mlvl_scores = torch.cat([padding, mlvl_scores], dim=1) + det_bboxes, det_labels = multiclass_nms( + mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img) + return det_bboxes, det_labels diff --git a/mmdet/models/anchor_heads/fcos_head.py b/mmdet/models/anchor_heads/fcos_head.py new file mode 100644 index 0000000..f16eb3c --- /dev/null +++ b/mmdet/models/anchor_heads/fcos_head.py @@ -0,0 +1,371 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import normal_init + +from mmdet.core import (sigmoid_focal_loss, iou_loss, multi_apply, + multiclass_nms, distance2bbox) +from ..registry import HEADS +from ..utils import bias_init_with_prob, Scale, ConvModule + +INF = 1e8 + + +@HEADS.register_module +class FCOSHead(nn.Module): + + def __init__(self, + num_classes, + in_channels, + feat_channels=256, + stacked_convs=4, + strides=(4, 8, 16, 32, 64), + regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512), + (512, INF)), + conv_cfg=None, + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)): + super(FCOSHead, self).__init__() + + self.num_classes = num_classes + self.cls_out_channels = num_classes - 1 + self.in_channels = in_channels + self.feat_channels = feat_channels + self.stacked_convs = stacked_convs + self.strides = strides + self.regress_ranges = regress_ranges + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + self._init_layers() + + def _init_layers(self): + self.cls_convs = nn.ModuleList() + self.reg_convs = nn.ModuleList() + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + self.cls_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + bias=self.norm_cfg is None)) + self.reg_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + bias=self.norm_cfg is None)) + self.fcos_cls = nn.Conv2d( + self.feat_channels, self.cls_out_channels, 3, padding=1) + self.fcos_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) + self.fcos_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1) + + self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) + + def init_weights(self): + for m in self.cls_convs: + normal_init(m.conv, std=0.01) + for m in self.reg_convs: + normal_init(m.conv, std=0.01) + bias_cls = bias_init_with_prob(0.01) + normal_init(self.fcos_cls, std=0.01, bias=bias_cls) + normal_init(self.fcos_reg, std=0.01) + normal_init(self.fcos_centerness, std=0.01) + + def forward(self, feats): + return multi_apply(self.forward_single, feats, self.scales) + + def forward_single(self, x, scale): + cls_feat = x + reg_feat = x + + for cls_layer in self.cls_convs: + cls_feat = cls_layer(cls_feat) + cls_score = self.fcos_cls(cls_feat) + centerness = self.fcos_centerness(cls_feat) + + for reg_layer in self.reg_convs: + reg_feat = reg_layer(reg_feat) + # scale the bbox_pred of different level + bbox_pred = scale(self.fcos_reg(reg_feat)).exp() + return cls_score, bbox_pred, centerness + + def loss(self, + cls_scores, + bbox_preds, + centernesses, + gt_bboxes, + gt_labels, + img_metas, + cfg, + gt_bboxes_ignore=None): + assert len(cls_scores) == len(bbox_preds) == len(centernesses) + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype, + bbox_preds[0].device) + labels, bbox_targets = self.fcos_target(all_level_points, gt_bboxes, + gt_labels) + + num_imgs = cls_scores[0].size(0) + # flatten cls_scores, bbox_preds and centerness + flatten_cls_scores = [ + cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) + for cls_score in cls_scores + ] + flatten_bbox_preds = [ + bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) + for bbox_pred in bbox_preds + ] + flatten_centerness = [ + centerness.permute(0, 2, 3, 1).reshape(-1) + for centerness in centernesses + ] + flatten_cls_scores = torch.cat(flatten_cls_scores) + flatten_bbox_preds = torch.cat(flatten_bbox_preds) + flatten_centerness = torch.cat(flatten_centerness) + flatten_labels = torch.cat(labels) + flatten_bbox_targets = torch.cat(bbox_targets) + # repeat points to align with bbox_preds + flatten_points = torch.cat( + [points.repeat(num_imgs, 1) for points in all_level_points]) + + pos_inds = flatten_labels.nonzero().reshape(-1) + num_pos = len(pos_inds) + loss_cls = sigmoid_focal_loss( + flatten_cls_scores, flatten_labels, cfg.gamma, cfg.alpha, + 'none').sum()[None] / (num_pos + num_imgs) # avoid num_pos is 0 + + pos_bbox_preds = flatten_bbox_preds[pos_inds] + pos_bbox_targets = flatten_bbox_targets[pos_inds] + pos_centerness = flatten_centerness[pos_inds] + pos_centerness_targets = self.centerness_target(pos_bbox_targets) + + if num_pos > 0: + pos_points = flatten_points[pos_inds] + pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds) + pos_decoded_target_preds = distance2bbox(pos_points, + pos_bbox_targets) + # centerness weighted iou loss + loss_reg = ((iou_loss( + pos_decoded_bbox_preds, + pos_decoded_target_preds, + reduction='none') * pos_centerness_targets).sum() / + pos_centerness_targets.sum())[None] + loss_centerness = F.binary_cross_entropy_with_logits( + pos_centerness, pos_centerness_targets, reduction='mean')[None] + else: + loss_reg = pos_bbox_preds.sum()[None] + loss_centerness = pos_centerness.sum()[None] + + return dict( + loss_cls=loss_cls, + loss_reg=loss_reg, + loss_centerness=loss_centerness) + + def get_bboxes(self, + cls_scores, + bbox_preds, + centernesses, + img_metas, + cfg, + rescale=None): + assert len(cls_scores) == len(bbox_preds) + num_levels = len(cls_scores) + + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype, + bbox_preds[0].device) + result_list = [] + for img_id in range(len(img_metas)): + cls_score_list = [ + cls_scores[i][img_id].detach() for i in range(num_levels) + ] + bbox_pred_list = [ + bbox_preds[i][img_id].detach() for i in range(num_levels) + ] + centerness_pred_list = [ + centernesses[i][img_id].detach() for i in range(num_levels) + ] + img_shape = img_metas[img_id]['img_shape'] + scale_factor = img_metas[img_id]['scale_factor'] + det_bboxes = self.get_bboxes_single( + cls_score_list, bbox_pred_list, centerness_pred_list, + mlvl_points, img_shape, scale_factor, cfg, rescale) + result_list.append(det_bboxes) + return result_list + + def get_bboxes_single(self, + cls_scores, + bbox_preds, + centernesses, + mlvl_points, + img_shape, + scale_factor, + cfg, + rescale=False): + assert len(cls_scores) == len(bbox_preds) == len(mlvl_points) + mlvl_bboxes = [] + mlvl_scores = [] + mlvl_centerness = [] + for cls_score, bbox_pred, centerness, points in zip( + cls_scores, bbox_preds, centernesses, mlvl_points): + assert cls_score.size()[-2:] == bbox_pred.size()[-2:] + scores = cls_score.permute(1, 2, 0).reshape( + -1, self.cls_out_channels).sigmoid() + centerness = centerness.permute(1, 2, 0).reshape(-1).sigmoid() + + bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) + nms_pre = cfg.get('nms_pre', -1) + if nms_pre > 0 and scores.shape[0] > nms_pre: + max_scores, _ = (scores * centerness[:, None]).max(dim=1) + _, topk_inds = max_scores.topk(nms_pre) + points = points[topk_inds, :] + bbox_pred = bbox_pred[topk_inds, :] + scores = scores[topk_inds, :] + centerness = centerness[topk_inds] + bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape) + mlvl_bboxes.append(bboxes) + mlvl_scores.append(scores) + mlvl_centerness.append(centerness) + mlvl_bboxes = torch.cat(mlvl_bboxes) + if rescale: + mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) + mlvl_scores = torch.cat(mlvl_scores) + padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) + mlvl_scores = torch.cat([padding, mlvl_scores], dim=1) + mlvl_centerness = torch.cat(mlvl_centerness) + det_bboxes, det_labels = multiclass_nms( + mlvl_bboxes, + mlvl_scores, + cfg.score_thr, + cfg.nms, + cfg.max_per_img, + score_factors=mlvl_centerness) + return det_bboxes, det_labels + + def get_points(self, featmap_sizes, dtype, device): + """Get points according to feature map sizes. + + Args: + featmap_sizes (list[tuple]): Multi-level feature map sizes. + dtype (torch.dtype): Type of points. + device (torch.device): Device of points. + + Returns: + tuple: points of each image. + """ + mlvl_points = [] + for i in range(len(featmap_sizes)): + mlvl_points.append( + self.get_points_single(featmap_sizes[i], self.strides[i], + dtype, device)) + return mlvl_points + + def get_points_single(self, featmap_size, stride, dtype, device): + h, w = featmap_size + x_range = torch.arange( + 0, w * stride, stride, dtype=dtype, device=device) + y_range = torch.arange( + 0, h * stride, stride, dtype=dtype, device=device) + y, x = torch.meshgrid(y_range, x_range) + points = torch.stack( + (x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2 + return points + + def fcos_target(self, points, gt_bboxes_list, gt_labels_list): + assert len(points) == len(self.regress_ranges) + num_levels = len(points) + # expand regress ranges to align with points + expanded_regress_ranges = [ + points[i].new_tensor(self.regress_ranges[i])[None].expand_as( + points[i]) for i in range(num_levels) + ] + # concat all levels points and regress ranges + concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0) + concat_points = torch.cat(points, dim=0) + # get labels and bbox_targets of each image + labels_list, bbox_targets_list = multi_apply( + self.fcos_target_single, + gt_bboxes_list, + gt_labels_list, + points=concat_points, + regress_ranges=concat_regress_ranges) + + # split to per img, per level + num_points = [center.size(0) for center in points] + labels_list = [labels.split(num_points, 0) for labels in labels_list] + bbox_targets_list = [ + bbox_targets.split(num_points, 0) + for bbox_targets in bbox_targets_list + ] + + # concat per level image + concat_lvl_labels = [] + concat_lvl_bbox_targets = [] + for i in range(num_levels): + concat_lvl_labels.append( + torch.cat([labels[i] for labels in labels_list])) + concat_lvl_bbox_targets.append( + torch.cat( + [bbox_targets[i] for bbox_targets in bbox_targets_list])) + return concat_lvl_labels, concat_lvl_bbox_targets + + def fcos_target_single(self, gt_bboxes, gt_labels, points, regress_ranges): + num_points = points.size(0) + num_gts = gt_labels.size(0) + + areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * ( + gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1) + # TODO: figure out why these two are different + # areas = areas[None].expand(num_points, num_gts) + areas = areas[None].repeat(num_points, 1) + regress_ranges = regress_ranges[:, None, :].expand( + num_points, num_gts, 2) + gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4) + xs, ys = points[:, 0], points[:, 1] + xs = xs[:, None].expand(num_points, num_gts) + ys = ys[:, None].expand(num_points, num_gts) + + left = xs - gt_bboxes[..., 0] + right = gt_bboxes[..., 2] - xs + top = ys - gt_bboxes[..., 1] + bottom = gt_bboxes[..., 3] - ys + bbox_targets = torch.stack((left, top, right, bottom), -1) + + # condition1: inside a gt bbox + inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0 + + # condition2: limit the regression range for each location + max_regress_distance = bbox_targets.max(-1)[0] + inside_regress_range = ( + max_regress_distance >= regress_ranges[..., 0]) & ( + max_regress_distance <= regress_ranges[..., 1]) + + # if there are still more than one objects for a location, + # we choose the one with minimal area + areas[inside_gt_bbox_mask == 0] = INF + areas[inside_regress_range == 0] = INF + min_area, min_area_inds = areas.min(dim=1) + + labels = gt_labels[min_area_inds] + labels[min_area == INF] = 0 + bbox_targets = bbox_targets[range(num_points), min_area_inds] + + return labels, bbox_targets + + def centerness_target(self, pos_bbox_targets): + # only calculate pos centerness targets, otherwise there may be nan + left_right = pos_bbox_targets[:, [0, 2]] + top_bottom = pos_bbox_targets[:, [1, 3]] + centerness_targets = ( + left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * ( + top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]) + return torch.sqrt(centerness_targets) diff --git a/mmdet/models/anchor_heads/retina_head.py b/mmdet/models/anchor_heads/retina_head.py new file mode 100644 index 0000000..3eefca4 --- /dev/null +++ b/mmdet/models/anchor_heads/retina_head.py @@ -0,0 +1,88 @@ +import numpy as np +import torch.nn as nn +from mmcv.cnn import normal_init + +from .anchor_head import AnchorHead +from ..registry import HEADS +from ..utils import bias_init_with_prob, ConvModule + + +@HEADS.register_module +class RetinaHead(AnchorHead): + + def __init__(self, + num_classes, + in_channels, + stacked_convs=4, + octave_base_scale=4, + scales_per_octave=3, + conv_cfg=None, + norm_cfg=None, + **kwargs): + self.stacked_convs = stacked_convs + self.octave_base_scale = octave_base_scale + self.scales_per_octave = scales_per_octave + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + octave_scales = np.array( + [2**(i / scales_per_octave) for i in range(scales_per_octave)]) + anchor_scales = octave_scales * octave_base_scale + super(RetinaHead, self).__init__( + num_classes, + in_channels, + anchor_scales=anchor_scales, + use_sigmoid_cls=True, + cls_focal_loss=True, + **kwargs) + + def _init_layers(self): + self.relu = nn.ReLU(inplace=True) + self.cls_convs = nn.ModuleList() + self.reg_convs = nn.ModuleList() + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + self.cls_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + self.reg_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + self.retina_cls = nn.Conv2d( + self.feat_channels, + self.num_anchors * self.cls_out_channels, + 3, + padding=1) + self.retina_reg = nn.Conv2d( + self.feat_channels, self.num_anchors * 4, 3, padding=1) + + def init_weights(self): + for m in self.cls_convs: + normal_init(m.conv, std=0.01) + for m in self.reg_convs: + normal_init(m.conv, std=0.01) + bias_cls = bias_init_with_prob(0.01) + normal_init(self.retina_cls, std=0.01, bias=bias_cls) + normal_init(self.retina_reg, std=0.01) + + def forward_single(self, x): + cls_feat = x + reg_feat = x + for cls_conv in self.cls_convs: + cls_feat = cls_conv(cls_feat) + for reg_conv in self.reg_convs: + reg_feat = reg_conv(reg_feat) + cls_score = self.retina_cls(cls_feat) + bbox_pred = self.retina_reg(reg_feat) + return cls_score, bbox_pred diff --git a/mmdet/models/anchor_heads/rpn_head.py b/mmdet/models/anchor_heads/rpn_head.py new file mode 100644 index 0000000..fe9d5c3 --- /dev/null +++ b/mmdet/models/anchor_heads/rpn_head.py @@ -0,0 +1,104 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import normal_init + +from mmdet.core import delta2bbox +from mmdet.ops import nms +from .anchor_head import AnchorHead +from ..registry import HEADS + + +@HEADS.register_module +class RPNHead(AnchorHead): + + def __init__(self, in_channels, **kwargs): + super(RPNHead, self).__init__(2, in_channels, **kwargs) + + def _init_layers(self): + self.rpn_conv = nn.Conv2d( + self.in_channels, self.feat_channels, 3, padding=1) + self.rpn_cls = nn.Conv2d(self.feat_channels, + self.num_anchors * self.cls_out_channels, 1) + self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1) + + def init_weights(self): + normal_init(self.rpn_conv, std=0.01) + normal_init(self.rpn_cls, std=0.01) + normal_init(self.rpn_reg, std=0.01) + + def forward_single(self, x): + x = self.rpn_conv(x) + x = F.relu(x, inplace=True) + rpn_cls_score = self.rpn_cls(x) + rpn_bbox_pred = self.rpn_reg(x) + return rpn_cls_score, rpn_bbox_pred + + def loss(self, + cls_scores, + bbox_preds, + gt_bboxes, + img_metas, + cfg, + gt_bboxes_ignore=None): + losses = super(RPNHead, self).loss( + cls_scores, + bbox_preds, + gt_bboxes, + None, + img_metas, + cfg, + gt_bboxes_ignore=gt_bboxes_ignore) + return dict( + loss_rpn_cls=losses['loss_cls'], loss_rpn_reg=losses['loss_reg']) + + def get_bboxes_single(self, + cls_scores, + bbox_preds, + mlvl_anchors, + img_shape, + scale_factor, + cfg, + rescale=False): + mlvl_proposals = [] + for idx in range(len(cls_scores)): + rpn_cls_score = cls_scores[idx] + rpn_bbox_pred = bbox_preds[idx] + assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] + anchors = mlvl_anchors[idx] + rpn_cls_score = rpn_cls_score.permute(1, 2, 0) + if self.use_sigmoid_cls: + rpn_cls_score = rpn_cls_score.reshape(-1) + scores = rpn_cls_score.sigmoid() + else: + rpn_cls_score = rpn_cls_score.reshape(-1, 2) + scores = rpn_cls_score.softmax(dim=1)[:, 1] + rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4) + if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre: + _, topk_inds = scores.topk(cfg.nms_pre) + rpn_bbox_pred = rpn_bbox_pred[topk_inds, :] + anchors = anchors[topk_inds, :] + scores = scores[topk_inds] + proposals = delta2bbox(anchors, rpn_bbox_pred, self.target_means, + self.target_stds, img_shape) + if cfg.min_bbox_size > 0: + w = proposals[:, 2] - proposals[:, 0] + 1 + h = proposals[:, 3] - proposals[:, 1] + 1 + valid_inds = torch.nonzero((w >= cfg.min_bbox_size) & + (h >= cfg.min_bbox_size)).squeeze() + proposals = proposals[valid_inds, :] + scores = scores[valid_inds] + proposals = torch.cat([proposals, scores.unsqueeze(-1)], dim=-1) + proposals, _ = nms(proposals, cfg.nms_thr) + proposals = proposals[:cfg.nms_post, :] + mlvl_proposals.append(proposals) + proposals = torch.cat(mlvl_proposals, 0) + if cfg.nms_across_levels: + proposals, _ = nms(proposals, cfg.nms_thr) + proposals = proposals[:cfg.max_num, :] + else: + scores = proposals[:, 4] + num = min(cfg.max_num, proposals.shape[0]) + _, topk_inds = scores.topk(num) + proposals = proposals[topk_inds, :] + return proposals diff --git a/mmdet/models/anchor_heads/ssd_head.py b/mmdet/models/anchor_heads/ssd_head.py new file mode 100644 index 0000000..9c8b2a1 --- /dev/null +++ b/mmdet/models/anchor_heads/ssd_head.py @@ -0,0 +1,191 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import xavier_init + +from mmdet.core import (AnchorGenerator, anchor_target, weighted_smoothl1, + multi_apply) +from .anchor_head import AnchorHead +from ..registry import HEADS + + +@HEADS.register_module +class SSDHead(AnchorHead): + + def __init__(self, + input_size=300, + num_classes=81, + in_channels=(512, 1024, 512, 256, 256, 256), + anchor_strides=(8, 16, 32, 64, 100, 300), + basesize_ratio_range=(0.1, 0.9), + anchor_ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]), + target_means=(.0, .0, .0, .0), + target_stds=(1.0, 1.0, 1.0, 1.0)): + super(AnchorHead, self).__init__() + self.input_size = input_size + self.num_classes = num_classes + self.in_channels = in_channels + self.cls_out_channels = num_classes + num_anchors = [len(ratios) * 2 + 2 for ratios in anchor_ratios] + reg_convs = [] + cls_convs = [] + for i in range(len(in_channels)): + reg_convs.append( + nn.Conv2d( + in_channels[i], + num_anchors[i] * 4, + kernel_size=3, + padding=1)) + cls_convs.append( + nn.Conv2d( + in_channels[i], + num_anchors[i] * num_classes, + kernel_size=3, + padding=1)) + self.reg_convs = nn.ModuleList(reg_convs) + self.cls_convs = nn.ModuleList(cls_convs) + + min_ratio, max_ratio = basesize_ratio_range + min_ratio = int(min_ratio * 100) + max_ratio = int(max_ratio * 100) + step = int(np.floor(max_ratio - min_ratio) / (len(in_channels) - 2)) + min_sizes = [] + max_sizes = [] + for r in range(int(min_ratio), int(max_ratio) + 1, step): + min_sizes.append(int(input_size * r / 100)) + max_sizes.append(int(input_size * (r + step) / 100)) + if input_size == 300: + if basesize_ratio_range[0] == 0.15: # SSD300 COCO + min_sizes.insert(0, int(input_size * 7 / 100)) + max_sizes.insert(0, int(input_size * 15 / 100)) + elif basesize_ratio_range[0] == 0.2: # SSD300 VOC + min_sizes.insert(0, int(input_size * 10 / 100)) + max_sizes.insert(0, int(input_size * 20 / 100)) + elif input_size == 512: + if basesize_ratio_range[0] == 0.1: # SSD512 COCO + min_sizes.insert(0, int(input_size * 4 / 100)) + max_sizes.insert(0, int(input_size * 10 / 100)) + elif basesize_ratio_range[0] == 0.15: # SSD512 VOC + min_sizes.insert(0, int(input_size * 7 / 100)) + max_sizes.insert(0, int(input_size * 15 / 100)) + self.anchor_generators = [] + self.anchor_strides = anchor_strides + for k in range(len(anchor_strides)): + base_size = min_sizes[k] + stride = anchor_strides[k] + ctr = ((stride - 1) / 2., (stride - 1) / 2.) + scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])] + ratios = [1.] + for r in anchor_ratios[k]: + ratios += [1 / r, r] # 4 or 6 ratio + anchor_generator = AnchorGenerator( + base_size, scales, ratios, scale_major=False, ctr=ctr) + indices = list(range(len(ratios))) + indices.insert(1, len(indices)) + anchor_generator.base_anchors = torch.index_select( + anchor_generator.base_anchors, 0, torch.LongTensor(indices)) + self.anchor_generators.append(anchor_generator) + + self.target_means = target_means + self.target_stds = target_stds + self.use_sigmoid_cls = False + self.cls_focal_loss = False + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + xavier_init(m, distribution='uniform', bias=0) + + def forward(self, feats): + cls_scores = [] + bbox_preds = [] + for feat, reg_conv, cls_conv in zip(feats, self.reg_convs, + self.cls_convs): + cls_scores.append(cls_conv(feat)) + bbox_preds.append(reg_conv(feat)) + return cls_scores, bbox_preds + + def loss_single(self, cls_score, bbox_pred, labels, label_weights, + bbox_targets, bbox_weights, num_total_samples, cfg): + loss_cls_all = F.cross_entropy( + cls_score, labels, reduction='none') * label_weights + pos_inds = (labels > 0).nonzero().view(-1) + neg_inds = (labels == 0).nonzero().view(-1) + + num_pos_samples = pos_inds.size(0) + num_neg_samples = cfg.neg_pos_ratio * num_pos_samples + if num_neg_samples > neg_inds.size(0): + num_neg_samples = neg_inds.size(0) + topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples) + loss_cls_pos = loss_cls_all[pos_inds].sum() + loss_cls_neg = topk_loss_cls_neg.sum() + loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples + + loss_reg = weighted_smoothl1( + bbox_pred, + bbox_targets, + bbox_weights, + beta=cfg.smoothl1_beta, + avg_factor=num_total_samples) + return loss_cls[None], loss_reg + + def loss(self, + cls_scores, + bbox_preds, + gt_bboxes, + gt_labels, + img_metas, + cfg, + gt_bboxes_ignore=None): + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == len(self.anchor_generators) + + anchor_list, valid_flag_list = self.get_anchors( + featmap_sizes, img_metas) + cls_reg_targets = anchor_target( + anchor_list, + valid_flag_list, + gt_bboxes, + img_metas, + self.target_means, + self.target_stds, + cfg, + gt_bboxes_ignore_list=gt_bboxes_ignore, + gt_labels_list=gt_labels, + label_channels=1, + sampling=False, + unmap_outputs=False) + if cls_reg_targets is None: + return None + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + num_total_pos, num_total_neg) = cls_reg_targets + + num_images = len(img_metas) + all_cls_scores = torch.cat([ + s.permute(0, 2, 3, 1).reshape( + num_images, -1, self.cls_out_channels) for s in cls_scores + ], 1) + all_labels = torch.cat(labels_list, -1).view(num_images, -1) + all_label_weights = torch.cat(label_weights_list, -1).view( + num_images, -1) + all_bbox_preds = torch.cat([ + b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) + for b in bbox_preds + ], -2) + all_bbox_targets = torch.cat(bbox_targets_list, -2).view( + num_images, -1, 4) + all_bbox_weights = torch.cat(bbox_weights_list, -2).view( + num_images, -1, 4) + + losses_cls, losses_reg = multi_apply( + self.loss_single, + all_cls_scores, + all_bbox_preds, + all_labels, + all_label_weights, + all_bbox_targets, + all_bbox_weights, + num_total_samples=num_total_pos, + cfg=cfg) + return dict(loss_cls=losses_cls, loss_reg=losses_reg) diff --git a/mmdet/models/backbones/__init__.py b/mmdet/models/backbones/__init__.py new file mode 100644 index 0000000..c91a92e --- /dev/null +++ b/mmdet/models/backbones/__init__.py @@ -0,0 +1,5 @@ +from .resnet import ResNet, make_res_layer +from .resnext import ResNeXt +from .ssd_vgg import SSDVGG + +__all__ = ['ResNet', 'make_res_layer', 'ResNeXt', 'SSDVGG'] diff --git a/mmdet/models/backbones/resnet.py b/mmdet/models/backbones/resnet.py new file mode 100644 index 0000000..d1db1dd --- /dev/null +++ b/mmdet/models/backbones/resnet.py @@ -0,0 +1,474 @@ +import logging + +import torch.nn as nn +import torch.utils.checkpoint as cp +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcv.cnn import constant_init, kaiming_init +from mmcv.runner import load_checkpoint + +from mmdet.ops import DeformConv, ModulatedDeformConv +from ..registry import BACKBONES +from ..utils import build_conv_layer, build_norm_layer + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None): + super(BasicBlock, self).__init__() + assert dcn is None, "Not implemented yet." + + self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) + self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) + + self.conv1 = build_conv_layer( + conv_cfg, + inplanes, + planes, + 3, + stride=stride, + padding=dilation, + dilation=dilation, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, planes, planes, 3, padding=1, bias=False) + self.add_module(self.norm2_name, norm2) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + assert not with_cp + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + @property + def norm2(self): + return getattr(self, self.norm2_name) + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None): + """Bottleneck block for ResNet. + If style is "pytorch", the stride-two layer is the 3x3 conv layer, + if it is "caffe", the stride-two layer is the first 1x1 conv layer. + """ + super(Bottleneck, self).__init__() + assert style in ['pytorch', 'caffe'] + assert dcn is None or isinstance(dcn, dict) + self.inplanes = inplanes + self.planes = planes + self.stride = stride + self.dilation = dilation + self.style = style + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.dcn = dcn + self.with_dcn = dcn is not None + if self.style == 'pytorch': + self.conv1_stride = 1 + self.conv2_stride = stride + else: + self.conv1_stride = stride + self.conv2_stride = 1 + + self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) + self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + norm_cfg, planes * self.expansion, postfix=3) + + self.conv1 = build_conv_layer( + conv_cfg, + inplanes, + planes, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + fallback_on_stride = False + self.with_modulated_dcn = False + if self.with_dcn: + fallback_on_stride = dcn.get('fallback_on_stride', False) + self.with_modulated_dcn = dcn.get('modulated', False) + if not self.with_dcn or fallback_on_stride: + self.conv2 = build_conv_layer( + conv_cfg, + planes, + planes, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + bias=False) + else: + assert conv_cfg is None, 'conv_cfg must be None for DCN' + deformable_groups = dcn.get('deformable_groups', 1) + if not self.with_modulated_dcn: + conv_op = DeformConv + offset_channels = 18 + else: + conv_op = ModulatedDeformConv + offset_channels = 27 + self.conv2_offset = nn.Conv2d( + planes, + deformable_groups * offset_channels, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation) + self.conv2 = conv_op( + planes, + planes, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + deformable_groups=deformable_groups, + bias=False) + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + conv_cfg, + planes, + planes * self.expansion, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + @property + def norm2(self): + return getattr(self, self.norm2_name) + + @property + def norm3(self): + return getattr(self, self.norm3_name) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + if not self.with_dcn: + out = self.conv2(out) + elif self.with_modulated_dcn: + offset_mask = self.conv2_offset(out) + offset = offset_mask[:, :18, :, :] + mask = offset_mask[:, -9:, :, :].sigmoid() + out = self.conv2(out, offset, mask) + else: + offset = self.conv2_offset(out) + out = self.conv2(out, offset) + out = self.norm2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.norm3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +def make_res_layer(block, + inplanes, + planes, + blocks, + stride=1, + dilation=1, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None): + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + build_conv_layer( + conv_cfg, + inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer(norm_cfg, planes * block.expansion)[1], + ) + + layers = [] + layers.append( + block( + inplanes, + planes, + stride, + dilation, + downsample, + style=style, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn)) + inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append( + block( + inplanes, + planes, + 1, + dilation, + style=style, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn)) + + return nn.Sequential(*layers) + + +@BACKBONES.register_module +class ResNet(nn.Module): + """ResNet backbone. + + Args: + depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. + num_stages (int): Resnet stages, normally 4. + strides (Sequence[int]): Strides of the first block of each stage. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. + norm_cfg (dict): dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): whether to use zero init for last norm layer + in resblocks to let them behave as identity. + """ + + arch_settings = { + 18: (BasicBlock, (2, 2, 2, 2)), + 34: (BasicBlock, (3, 4, 6, 3)), + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, + depth, + num_stages=4, + strides=(1, 2, 2, 2), + dilations=(1, 1, 1, 1), + out_indices=(0, 1, 2, 3), + style='pytorch', + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + dcn=None, + stage_with_dcn=(False, False, False, False), + with_cp=False, + zero_init_residual=True): + super(ResNet, self).__init__() + if depth not in self.arch_settings: + raise KeyError('invalid depth {} for resnet'.format(depth)) + self.depth = depth + self.num_stages = num_stages + assert num_stages >= 1 and num_stages <= 4 + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == num_stages + self.out_indices = out_indices + assert max(out_indices) < num_stages + self.style = style + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + self.dcn = dcn + self.stage_with_dcn = stage_with_dcn + if dcn is not None: + assert len(stage_with_dcn) == num_stages + self.zero_init_residual = zero_init_residual + self.block, stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + self.inplanes = 64 + + self._make_stem_layer() + + self.res_layers = [] + for i, num_blocks in enumerate(self.stage_blocks): + stride = strides[i] + dilation = dilations[i] + dcn = self.dcn if self.stage_with_dcn[i] else None + planes = 64 * 2**i + res_layer = make_res_layer( + self.block, + self.inplanes, + planes, + num_blocks, + stride=stride, + dilation=dilation, + style=self.style, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn) + self.inplanes = planes * self.block.expansion + layer_name = 'layer{}'.format(i + 1) + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + + self.feat_dim = self.block.expansion * 64 * 2**( + len(self.stage_blocks) - 1) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + def _make_stem_layer(self): + self.conv1 = build_conv_layer( + self.conv_cfg, + 3, + 64, + kernel_size=7, + stride=2, + padding=3, + bias=False) + self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.norm1.eval() + for m in [self.conv1, self.norm1]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, 'layer{}'.format(i)) + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = logging.getLogger() + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m, 1) + + if self.dcn is not None: + for m in self.modules(): + if isinstance(m, Bottleneck) and hasattr( + m, 'conv2_offset'): + constant_init(m.conv2_offset, 0) + + if self.zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + constant_init(m.norm3, 0) + elif isinstance(m, BasicBlock): + constant_init(m.norm2, 0) + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.maxpool(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) + + def train(self, mode=True): + super(ResNet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmdet/models/backbones/resnext.py b/mmdet/models/backbones/resnext.py new file mode 100644 index 0000000..c869a02 --- /dev/null +++ b/mmdet/models/backbones/resnext.py @@ -0,0 +1,218 @@ +import math + +import torch.nn as nn + +from mmdet.ops import DeformConv, ModulatedDeformConv +from .resnet import Bottleneck as _Bottleneck +from .resnet import ResNet +from ..registry import BACKBONES +from ..utils import build_conv_layer, build_norm_layer + + +class Bottleneck(_Bottleneck): + + def __init__(self, *args, groups=1, base_width=4, **kwargs): + """Bottleneck block for ResNeXt. + If style is "pytorch", the stride-two layer is the 3x3 conv layer, + if it is "caffe", the stride-two layer is the first 1x1 conv layer. + """ + super(Bottleneck, self).__init__(*args, **kwargs) + + if groups == 1: + width = self.planes + else: + width = math.floor(self.planes * (base_width / 64)) * groups + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, width, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + self.norm_cfg, width, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.planes * self.expansion, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.inplanes, + width, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + fallback_on_stride = False + self.with_modulated_dcn = False + if self.with_dcn: + fallback_on_stride = self.dcn.get('fallback_on_stride', False) + self.with_modulated_dcn = self.dcn.get('modulated', False) + if not self.with_dcn or fallback_on_stride: + self.conv2 = build_conv_layer( + self.conv_cfg, + width, + width, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + bias=False) + else: + assert self.conv_cfg is None, 'conv_cfg must be None for DCN' + groups = self.dcn.get('groups', 1) + deformable_groups = self.dcn.get('deformable_groups', 1) + if not self.with_modulated_dcn: + conv_op = DeformConv + offset_channels = 18 + else: + conv_op = ModulatedDeformConv + offset_channels = 27 + self.conv2_offset = nn.Conv2d( + width, + deformable_groups * offset_channels, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation) + self.conv2 = conv_op( + width, + width, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + deformable_groups=deformable_groups, + bias=False) + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + self.conv_cfg, + width, + self.planes * self.expansion, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + +def make_res_layer(block, + inplanes, + planes, + blocks, + stride=1, + dilation=1, + groups=1, + base_width=4, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None): + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + build_conv_layer( + conv_cfg, + inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer(norm_cfg, planes * block.expansion)[1], + ) + + layers = [] + layers.append( + block( + inplanes, + planes, + stride=stride, + dilation=dilation, + downsample=downsample, + groups=groups, + base_width=base_width, + style=style, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn)) + inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append( + block( + inplanes, + planes, + stride=1, + dilation=dilation, + groups=groups, + base_width=base_width, + style=style, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn)) + + return nn.Sequential(*layers) + + +@BACKBONES.register_module +class ResNeXt(ResNet): + """ResNeXt backbone. + + Args: + depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. + num_stages (int): Resnet stages, normally 4. + groups (int): Group of resnext. + base_width (int): Base width of resnext. + strides (Sequence[int]): Strides of the first block of each stage. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. + norm_cfg (dict): dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): whether to use zero init for last norm layer + in resblocks to let them behave as identity. + """ + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, groups=1, base_width=4, **kwargs): + super(ResNeXt, self).__init__(**kwargs) + self.groups = groups + self.base_width = base_width + + self.inplanes = 64 + self.res_layers = [] + for i, num_blocks in enumerate(self.stage_blocks): + stride = self.strides[i] + dilation = self.dilations[i] + dcn = self.dcn if self.stage_with_dcn[i] else None + planes = 64 * 2**i + res_layer = make_res_layer( + self.block, + self.inplanes, + planes, + num_blocks, + stride=stride, + dilation=dilation, + groups=self.groups, + base_width=self.base_width, + style=self.style, + with_cp=self.with_cp, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + dcn=dcn) + self.inplanes = planes * self.block.expansion + layer_name = 'layer{}'.format(i + 1) + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() diff --git a/mmdet/models/backbones/ssd_vgg.py b/mmdet/models/backbones/ssd_vgg.py new file mode 100644 index 0000000..ffce9a9 --- /dev/null +++ b/mmdet/models/backbones/ssd_vgg.py @@ -0,0 +1,130 @@ +import logging + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import (VGG, xavier_init, constant_init, kaiming_init, + normal_init) +from mmcv.runner import load_checkpoint +from ..registry import BACKBONES + + +@BACKBONES.register_module +class SSDVGG(VGG): + extra_setting = { + 300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256), + 512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128), + } + + def __init__(self, + input_size, + depth, + with_last_pool=False, + ceil_mode=True, + out_indices=(3, 4), + out_feature_indices=(22, 34), + l2_norm_scale=20.): + super(SSDVGG, self).__init__( + depth, + with_last_pool=with_last_pool, + ceil_mode=ceil_mode, + out_indices=out_indices) + assert input_size in (300, 512) + self.input_size = input_size + + self.features.add_module( + str(len(self.features)), + nn.MaxPool2d(kernel_size=3, stride=1, padding=1)) + self.features.add_module( + str(len(self.features)), + nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)) + self.features.add_module( + str(len(self.features)), nn.ReLU(inplace=True)) + self.features.add_module( + str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1)) + self.features.add_module( + str(len(self.features)), nn.ReLU(inplace=True)) + self.out_feature_indices = out_feature_indices + + self.inplanes = 1024 + self.extra = self._make_extra_layers(self.extra_setting[input_size]) + self.l2_norm = L2Norm( + self.features[out_feature_indices[0] - 1].out_channels, + l2_norm_scale) + + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = logging.getLogger() + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + for m in self.features.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, nn.BatchNorm2d): + constant_init(m, 1) + elif isinstance(m, nn.Linear): + normal_init(m, std=0.01) + else: + raise TypeError('pretrained must be a str or None') + + for m in self.extra.modules(): + if isinstance(m, nn.Conv2d): + xavier_init(m, distribution='uniform') + + constant_init(self.l2_norm, self.l2_norm.scale) + + def forward(self, x): + outs = [] + for i, layer in enumerate(self.features): + x = layer(x) + if i in self.out_feature_indices: + outs.append(x) + for i, layer in enumerate(self.extra): + x = F.relu(layer(x), inplace=True) + if i % 2 == 1: + outs.append(x) + outs[0] = self.l2_norm(outs[0]) + if len(outs) == 1: + return outs[0] + else: + return tuple(outs) + + def _make_extra_layers(self, outplanes): + layers = [] + kernel_sizes = (1, 3) + num_layers = 0 + outplane = None + for i in range(len(outplanes)): + if self.inplanes == 'S': + self.inplanes = outplane + continue + k = kernel_sizes[num_layers % 2] + if outplanes[i] == 'S': + outplane = outplanes[i + 1] + conv = nn.Conv2d( + self.inplanes, outplane, k, stride=2, padding=1) + else: + outplane = outplanes[i] + conv = nn.Conv2d( + self.inplanes, outplane, k, stride=1, padding=0) + layers.append(conv) + self.inplanes = outplanes[i] + num_layers += 1 + if self.input_size == 512: + layers.append(nn.Conv2d(self.inplanes, 256, 4, padding=1)) + + return nn.Sequential(*layers) + + +class L2Norm(nn.Module): + + def __init__(self, n_dims, scale=20., eps=1e-10): + super(L2Norm, self).__init__() + self.n_dims = n_dims + self.weight = nn.Parameter(torch.Tensor(self.n_dims)) + self.eps = eps + self.scale = scale + + def forward(self, x): + norm = x.pow(2).sum(1, keepdim=True).sqrt() + self.eps + return self.weight[None, :, None, None].expand_as(x) * x / norm diff --git a/mmdet/models/bbox_heads/__init__.py b/mmdet/models/bbox_heads/__init__.py new file mode 100644 index 0000000..dac10bd --- /dev/null +++ b/mmdet/models/bbox_heads/__init__.py @@ -0,0 +1,4 @@ +from .bbox_head import BBoxHead +from .convfc_bbox_head import ConvFCBBoxHead, SharedFCBBoxHead + +__all__ = ['BBoxHead', 'ConvFCBBoxHead', 'SharedFCBBoxHead'] diff --git a/mmdet/models/bbox_heads/bbox_head.py b/mmdet/models/bbox_heads/bbox_head.py new file mode 100644 index 0000000..4dcbd97 --- /dev/null +++ b/mmdet/models/bbox_heads/bbox_head.py @@ -0,0 +1,208 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from mmdet.core import (delta2bbox, multiclass_nms, bbox_target, + weighted_cross_entropy, weighted_smoothl1, accuracy) +from ..registry import HEADS + + +@HEADS.register_module +class BBoxHead(nn.Module): + """Simplest RoI head, with only two fc layers for classification and + regression respectively""" + + def __init__(self, + with_avg_pool=False, + with_cls=True, + with_reg=True, + roi_feat_size=7, + in_channels=256, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False): + super(BBoxHead, self).__init__() + assert with_cls or with_reg + self.with_avg_pool = with_avg_pool + self.with_cls = with_cls + self.with_reg = with_reg + self.roi_feat_size = roi_feat_size + self.in_channels = in_channels + self.num_classes = num_classes + self.target_means = target_means + self.target_stds = target_stds + self.reg_class_agnostic = reg_class_agnostic + + in_channels = self.in_channels + if self.with_avg_pool: + self.avg_pool = nn.AvgPool2d(roi_feat_size) + else: + in_channels *= (self.roi_feat_size * self.roi_feat_size) + if self.with_cls: + self.fc_cls = nn.Linear(in_channels, num_classes) + if self.with_reg: + out_dim_reg = 4 if reg_class_agnostic else 4 * num_classes + self.fc_reg = nn.Linear(in_channels, out_dim_reg) + self.debug_imgs = None + + def init_weights(self): + if self.with_cls: + nn.init.normal_(self.fc_cls.weight, 0, 0.01) + nn.init.constant_(self.fc_cls.bias, 0) + if self.with_reg: + nn.init.normal_(self.fc_reg.weight, 0, 0.001) + nn.init.constant_(self.fc_reg.bias, 0) + + def forward(self, x): + if self.with_avg_pool: + x = self.avg_pool(x) + x = x.view(x.size(0), -1) + cls_score = self.fc_cls(x) if self.with_cls else None + bbox_pred = self.fc_reg(x) if self.with_reg else None + return cls_score, bbox_pred + + def get_target(self, sampling_results, gt_bboxes, gt_labels, + rcnn_train_cfg): + pos_proposals = [res.pos_bboxes for res in sampling_results] + neg_proposals = [res.neg_bboxes for res in sampling_results] + pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results] + pos_gt_labels = [res.pos_gt_labels for res in sampling_results] + reg_classes = 1 if self.reg_class_agnostic else self.num_classes + cls_reg_targets = bbox_target( + pos_proposals, + neg_proposals, + pos_gt_bboxes, + pos_gt_labels, + rcnn_train_cfg, + reg_classes, + target_means=self.target_means, + target_stds=self.target_stds) + return cls_reg_targets + + def loss(self, + cls_score, + bbox_pred, + labels, + label_weights, + bbox_targets, + bbox_weights, + reduce=True): + losses = dict() + if cls_score is not None: + losses['loss_cls'] = weighted_cross_entropy( + cls_score, labels, label_weights, reduce=reduce) + losses['acc'] = accuracy(cls_score, labels) + if bbox_pred is not None: + pos_inds = labels > 0 + if self.reg_class_agnostic: + pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), 4)[pos_inds] + else: + pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, + 4)[pos_inds, labels[pos_inds]] + losses['loss_reg'] = weighted_smoothl1( + pos_bbox_pred, + bbox_targets[pos_inds], + bbox_weights[pos_inds], + avg_factor=bbox_targets.size(0)) + return losses + + def get_det_bboxes(self, + rois, + cls_score, + bbox_pred, + img_shape, + scale_factor, + rescale=False, + cfg=None): + if isinstance(cls_score, list): + cls_score = sum(cls_score) / float(len(cls_score)) + scores = F.softmax(cls_score, dim=1) if cls_score is not None else None + + if bbox_pred is not None: + bboxes = delta2bbox(rois[:, 1:], bbox_pred, self.target_means, + self.target_stds, img_shape) + else: + bboxes = rois[:, 1:] + # TODO: add clip here + + if rescale: + bboxes /= scale_factor + + if cfg is None: + return bboxes, scores + else: + det_bboxes, det_labels = multiclass_nms( + bboxes, scores, cfg.score_thr, cfg.nms, cfg.max_per_img) + + return det_bboxes, det_labels + + def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas): + """Refine bboxes during training. + + Args: + rois (Tensor): Shape (n*bs, 5), where n is image number per GPU, + and bs is the sampled RoIs per image. + labels (Tensor): Shape (n*bs, ). + bbox_preds (Tensor): Shape (n*bs, 4) or (n*bs, 4*#class). + pos_is_gts (list[Tensor]): Flags indicating if each positive bbox + is a gt bbox. + img_metas (list[dict]): Meta info of each image. + + Returns: + list[Tensor]: Refined bboxes of each image in a mini-batch. + """ + img_ids = rois[:, 0].long().unique(sorted=True) + assert img_ids.numel() == len(img_metas) + + bboxes_list = [] + for i in range(len(img_metas)): + inds = torch.nonzero(rois[:, 0] == i).squeeze() + num_rois = inds.numel() + + bboxes_ = rois[inds, 1:] + label_ = labels[inds] + bbox_pred_ = bbox_preds[inds] + img_meta_ = img_metas[i] + pos_is_gts_ = pos_is_gts[i] + + bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_, + img_meta_) + # filter gt bboxes + pos_keep = 1 - pos_is_gts_ + keep_inds = pos_is_gts_.new_ones(num_rois) + keep_inds[:len(pos_is_gts_)] = pos_keep + + bboxes_list.append(bboxes[keep_inds]) + + return bboxes_list + + def regress_by_class(self, rois, label, bbox_pred, img_meta): + """Regress the bbox for the predicted class. Used in Cascade R-CNN. + + Args: + rois (Tensor): shape (n, 4) or (n, 5) + label (Tensor): shape (n, ) + bbox_pred (Tensor): shape (n, 4*(#class+1)) or (n, 4) + img_meta (dict): Image meta info. + + Returns: + Tensor: Regressed bboxes, the same shape as input rois. + """ + assert rois.size(1) == 4 or rois.size(1) == 5 + + if not self.reg_class_agnostic: + label = label * 4 + inds = torch.stack((label, label + 1, label + 2, label + 3), 1) + bbox_pred = torch.gather(bbox_pred, 1, inds) + assert bbox_pred.size(1) == 4 + + if rois.size(1) == 4: + new_rois = delta2bbox(rois, bbox_pred, self.target_means, + self.target_stds, img_meta['img_shape']) + else: + bboxes = delta2bbox(rois[:, 1:], bbox_pred, self.target_means, + self.target_stds, img_meta['img_shape']) + new_rois = torch.cat((rois[:, [0]], bboxes), dim=1) + + return new_rois diff --git a/mmdet/models/bbox_heads/convfc_bbox_head.py b/mmdet/models/bbox_heads/convfc_bbox_head.py new file mode 100644 index 0000000..bbe0c6b --- /dev/null +++ b/mmdet/models/bbox_heads/convfc_bbox_head.py @@ -0,0 +1,246 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .bbox_head import BBoxHead +from ..registry import HEADS +from ..utils import ConvModule + +from mmdet.core import (delta2bbox, multiclass_nms, bbox_target, + weighted_cross_entropy, weighted_smoothl1, accuracy) +from ..registry import HEADS + +@HEADS.register_module +class ConvFCBBoxHead(BBoxHead): + """More general bbox head, with shared conv and fc layers and two optional + separated branches. + + /-> cls convs -> cls fcs -> cls + shared convs -> shared fcs + \-> reg convs -> reg fcs -> reg + """ # noqa: W605 + + def __init__(self, + num_shared_convs=0, + num_shared_fcs=0, + num_cls_convs=0, + num_cls_fcs=0, + num_reg_convs=0, + num_reg_fcs=0, + conv_out_channels=256, + fc_out_channels=1024, + conv_cfg=None, + norm_cfg=None, + *args, + **kwargs): + super(ConvFCBBoxHead, self).__init__(*args, **kwargs) + assert (num_shared_convs + num_shared_fcs + num_cls_convs + num_cls_fcs + + num_reg_convs + num_reg_fcs > 0) + if num_cls_convs > 0 or num_reg_convs > 0: + assert num_shared_fcs == 0 + if not self.with_cls: + assert num_cls_convs == 0 and num_cls_fcs == 0 + if not self.with_reg: + assert num_reg_convs == 0 and num_reg_fcs == 0 + self.num_shared_convs = num_shared_convs + self.num_shared_fcs = num_shared_fcs + self.num_cls_convs = num_cls_convs + self.num_cls_fcs = num_cls_fcs + self.num_reg_convs = num_reg_convs + self.num_reg_fcs = num_reg_fcs + self.conv_out_channels = conv_out_channels + self.fc_out_channels = fc_out_channels + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + # add shared convs and fcs + self.shared_convs, self.shared_fcs, last_layer_dim = \ + self._add_conv_fc_branch( + self.num_shared_convs, self.num_shared_fcs, self.in_channels, + True) + self.shared_out_channels = last_layer_dim + + # add cls specific branch + self.cls_convs, self.cls_fcs, self.cls_last_dim = \ + self._add_conv_fc_branch( + self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) + + # add reg specific branch + self.reg_convs, self.reg_fcs, self.reg_last_dim = \ + self._add_conv_fc_branch( + self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) + + if self.num_shared_fcs == 0 and not self.with_avg_pool: + if self.num_cls_fcs == 0: + self.cls_last_dim *= (self.roi_feat_size * self.roi_feat_size) + if self.num_reg_fcs == 0: + self.reg_last_dim *= (self.roi_feat_size * self.roi_feat_size) + + self.relu = nn.ReLU(inplace=True) + # reconstruct fc_cls and fc_reg since input channels are changed + if self.with_cls: + self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes) + if self.with_reg: + out_dim_reg = (4 if self.reg_class_agnostic else + 4 * self.num_classes) + self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg) + + def _add_conv_fc_branch(self, + num_branch_convs, + num_branch_fcs, + in_channels, + is_shared=False): + """Add shared or separable branch + + convs -> avg pool (optional) -> fcs + """ + last_layer_dim = in_channels + # add branch specific conv layers + branch_convs = nn.ModuleList() + if num_branch_convs > 0: + for i in range(num_branch_convs): + conv_in_channels = ( + last_layer_dim if i == 0 else self.conv_out_channels) + branch_convs.append( + ConvModule( + conv_in_channels, + self.conv_out_channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + last_layer_dim = self.conv_out_channels + # add branch specific fc layers + branch_fcs = nn.ModuleList() + if num_branch_fcs > 0: + # for shared branch, only consider self.with_avg_pool + # for separated branches, also consider self.num_shared_fcs + if (is_shared + or self.num_shared_fcs == 0) and not self.with_avg_pool: + last_layer_dim *= (self.roi_feat_size * self.roi_feat_size) + for i in range(num_branch_fcs): + fc_in_channels = ( + last_layer_dim if i == 0 else self.fc_out_channels) + branch_fcs.append( + nn.Linear(fc_in_channels, self.fc_out_channels)) + last_layer_dim = self.fc_out_channels + return branch_convs, branch_fcs, last_layer_dim + + def init_weights(self): + super(ConvFCBBoxHead, self).init_weights() + for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]: + for m in module_list.modules(): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + nn.init.constant_(m.bias, 0) + + def forward(self, x): + # shared part + if self.num_shared_convs > 0: + for conv in self.shared_convs: + x = conv(x) + + if self.num_shared_fcs > 0: + if self.with_avg_pool: + x = self.avg_pool(x) + x = x.view(x.size(0), -1) + for fc in self.shared_fcs: + x = self.relu(fc(x)) + # separate branches + x_cls = x + x_reg = x + + for conv in self.cls_convs: + x_cls = conv(x_cls) + if x_cls.dim() > 2: + if self.with_avg_pool: + x_cls = self.avg_pool(x_cls) + x_cls = x_cls.view(x_cls.size(0), -1) + for fc in self.cls_fcs: + x_cls = self.relu(fc(x_cls)) + + for conv in self.reg_convs: + x_reg = conv(x_reg) + if x_reg.dim() > 2: + if self.with_avg_pool: + x_reg = self.avg_pool(x_reg) + x_reg = x_reg.view(x_reg.size(0), -1) + for fc in self.reg_fcs: + x_reg = self.relu(fc(x_reg)) + + cls_score = self.fc_cls(x_cls) if self.with_cls else None + bbox_pred = self.fc_reg(x_reg) if self.with_reg else None + return cls_score, bbox_pred + + +@HEADS.register_module +class SharedFCBBoxHead(ConvFCBBoxHead): + + def __init__(self, num_fcs=2, fc_out_channels=1024, *args, **kwargs): + assert num_fcs >= 1 + super(SharedFCBBoxHead, self).__init__( + num_shared_convs=0, + num_shared_fcs=num_fcs, + num_cls_convs=0, + num_cls_fcs=0, + num_reg_convs=0, + num_reg_fcs=0, + fc_out_channels=fc_out_channels, + *args, + **kwargs) + +@HEADS.register_module +class SharedFCBBoxHeadGrid(ConvFCBBoxHead): + + def __init__(self, num_fcs=2, fc_out_channels=1024, *args, **kwargs): + assert num_fcs >= 1 + super(SharedFCBBoxHeadGrid, self).__init__( + num_shared_convs=0, + num_shared_fcs=num_fcs, + num_cls_convs=0, + num_cls_fcs=0, + num_reg_convs=0, + num_reg_fcs=0, + fc_out_channels=fc_out_channels, + *args, + **kwargs) + + def loss(self, + cls_score, + bbox_pred, + labels, + label_weights, + bbox_targets, + bbox_weights, + reduce=True): + losses = dict() + if cls_score is not None: + losses['loss_cls'] = weighted_cross_entropy( + cls_score, labels, label_weights, reduce=reduce) + losses['acc'] = accuracy(cls_score, labels) + return losses + + def get_det_bboxes(self, + rois, + cls_score, + bbox_pred, + img_shape, + scale_factor, + rescale=False, + cfg=None): + if isinstance(cls_score, list): + cls_score = sum(cls_score) / float(len(cls_score)) + scores = F.softmax(cls_score, dim=1) if cls_score is not None else None + + bboxes = rois[:, 1:] + if img_shape[0] is not None: + bboxes[:,[0,2]].clamp_(min=0, max=img_shape[1]-1) + bboxes[:,[1,3]].clamp_(min=0, max=img_shape[0]-1) + + if cfg is None: + return bboxes, scores + else: + det_bboxes, det_labels = multiclass_nms( + bboxes, scores, cfg.score_thr, cfg.nms, cfg.max_per_img) + + return det_bboxes, det_labels \ No newline at end of file diff --git a/mmdet/models/builder.py b/mmdet/models/builder.py new file mode 100644 index 0000000..3de186c --- /dev/null +++ b/mmdet/models/builder.py @@ -0,0 +1,56 @@ +import mmcv +from torch import nn + +from .registry import (BACKBONES, NECKS, ROI_EXTRACTORS, SHARED_HEADS, HEADS, + DETECTORS) + + +def _build_module(cfg, registry, default_args): + assert isinstance(cfg, dict) and 'type' in cfg + assert isinstance(default_args, dict) or default_args is None + args = cfg.copy() + obj_type = args.pop('type') + if mmcv.is_str(obj_type): + if obj_type not in registry.module_dict: + raise KeyError('{} is not in the {} registry'.format( + obj_type, registry.name)) + obj_type = registry.module_dict[obj_type] + elif not isinstance(obj_type, type): + raise TypeError('type must be a str or valid type, but got {}'.format( + type(obj_type))) + if default_args is not None: + for name, value in default_args.items(): + args.setdefault(name, value) + return obj_type(**args) + + +def build(cfg, registry, default_args=None): + if isinstance(cfg, list): + modules = [_build_module(cfg_, registry, default_args) for cfg_ in cfg] + return nn.Sequential(*modules) + else: + return _build_module(cfg, registry, default_args) + + +def build_backbone(cfg): + return build(cfg, BACKBONES) + + +def build_neck(cfg): + return build(cfg, NECKS) + + +def build_roi_extractor(cfg): + return build(cfg, ROI_EXTRACTORS) + + +def build_shared_head(cfg): + return build(cfg, SHARED_HEADS) + + +def build_head(cfg): + return build(cfg, HEADS) + + +def build_detector(cfg, train_cfg=None, test_cfg=None): + return build(cfg, DETECTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg)) diff --git a/mmdet/models/detectors/__init__.py b/mmdet/models/detectors/__init__.py new file mode 100644 index 0000000..4abd939 --- /dev/null +++ b/mmdet/models/detectors/__init__.py @@ -0,0 +1,18 @@ +from .base import BaseDetector +from .single_stage import SingleStageDetector +from .two_stage import TwoStageDetector +from .rpn import RPN +from .fast_rcnn import FastRCNN +from .faster_rcnn import FasterRCNN +from .mask_rcnn import MaskRCNN +from .cascade_rcnn import CascadeRCNN +from .htc import HybridTaskCascade +from .retinanet import RetinaNet +from .fcos import FCOS +from .grid_rcnn import GridRCNN + +__all__ = [ + 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN', + 'FastRCNN', 'FasterRCNN', 'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', + 'RetinaNet', 'FCOS', 'GridRCNN' +] diff --git a/mmdet/models/detectors/base.py b/mmdet/models/detectors/base.py new file mode 100644 index 0000000..311ca90 --- /dev/null +++ b/mmdet/models/detectors/base.py @@ -0,0 +1,140 @@ +import logging +from abc import ABCMeta, abstractmethod + +import mmcv +import numpy as np +import torch.nn as nn +import pycocotools.mask as maskUtils + +from mmdet.core import tensor2imgs, get_classes + + +class BaseDetector(nn.Module): + """Base class for detectors""" + + __metaclass__ = ABCMeta + + def __init__(self): + super(BaseDetector, self).__init__() + + @property + def with_neck(self): + return hasattr(self, 'neck') and self.neck is not None + + @property + def with_shared_head(self): + return hasattr(self, 'shared_head') and self.shared_head is not None + + @property + def with_bbox(self): + return hasattr(self, 'bbox_head') and self.bbox_head is not None + + @property + def with_mask(self): + return hasattr(self, 'mask_head') and self.mask_head is not None + + @abstractmethod + def extract_feat(self, imgs): + pass + + def extract_feats(self, imgs): + assert isinstance(imgs, list) + for img in imgs: + yield self.extract_feat(img) + + @abstractmethod + def forward_train(self, imgs, img_metas, **kwargs): + pass + + @abstractmethod + def simple_test(self, img, img_meta, **kwargs): + pass + + @abstractmethod + def aug_test(self, imgs, img_metas, **kwargs): + pass + + def init_weights(self, pretrained=None): + if pretrained is not None: + logger = logging.getLogger() + logger.info('load model from: {}'.format(pretrained)) + + def forward_test(self, imgs, img_metas, **kwargs): + for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: + if not isinstance(var, list): + raise TypeError('{} must be a list, but got {}'.format( + name, type(var))) + + num_augs = len(imgs) + if num_augs != len(img_metas): + raise ValueError( + 'num of augmentations ({}) != num of image meta ({})'.format( + len(imgs), len(img_metas))) + # TODO: remove the restriction of imgs_per_gpu == 1 when prepared + imgs_per_gpu = imgs[0].size(0) + assert imgs_per_gpu == 1 + + if num_augs == 1: + return self.simple_test(imgs[0], img_metas[0], **kwargs) + else: + return self.aug_test(imgs, img_metas, **kwargs) + + def forward(self, img, img_meta, return_loss=True, **kwargs): + if return_loss: + return self.forward_train(img, img_meta, **kwargs) + else: + return self.forward_test(img, img_meta, **kwargs) + + def show_result(self, + data, + result, + img_norm_cfg, + dataset=None, + score_thr=0.3): + if isinstance(result, tuple): + bbox_result, segm_result = result + else: + bbox_result, segm_result = result, None + + img_tensor = data['img'][0] + img_metas = data['img_meta'][0].data[0] + imgs = tensor2imgs(img_tensor, **img_norm_cfg) + assert len(imgs) == len(img_metas) + + if dataset is None: + class_names = self.CLASSES + elif isinstance(dataset, str): + class_names = get_classes(dataset) + elif isinstance(dataset, (list, tuple)): + class_names = dataset + else: + raise TypeError( + 'dataset must be a valid dataset name or a sequence' + ' of class names, not {}'.format(type(dataset))) + + for img, img_meta in zip(imgs, img_metas): + h, w, _ = img_meta['img_shape'] + img_show = img[:h, :w, :] + + bboxes = np.vstack(bbox_result) + # draw segmentation masks + if segm_result is not None: + segms = mmcv.concat_list(segm_result) + inds = np.where(bboxes[:, -1] > score_thr)[0] + for i in inds: + color_mask = np.random.randint( + 0, 256, (1, 3), dtype=np.uint8) + mask = maskUtils.decode(segms[i]).astype(np.bool) + img_show[mask] = img_show[mask] * 0.5 + color_mask * 0.5 + # draw bounding boxes + labels = [ + np.full(bbox.shape[0], i, dtype=np.int32) + for i, bbox in enumerate(bbox_result) + ] + labels = np.concatenate(labels) + mmcv.imshow_det_bboxes( + img_show, + bboxes, + labels, + class_names=class_names, + score_thr=score_thr) diff --git a/mmdet/models/detectors/cascade_rcnn.py b/mmdet/models/detectors/cascade_rcnn.py new file mode 100644 index 0000000..f0564c9 --- /dev/null +++ b/mmdet/models/detectors/cascade_rcnn.py @@ -0,0 +1,379 @@ +from __future__ import division + +import torch +import torch.nn as nn + +from .base import BaseDetector +from .test_mixins import RPNTestMixin +from .. import builder +from ..registry import DETECTORS +from mmdet.core import (build_assigner, bbox2roi, bbox2result, build_sampler, + merge_aug_masks) + + +@DETECTORS.register_module +class CascadeRCNN(BaseDetector, RPNTestMixin): + + def __init__(self, + num_stages, + backbone, + neck=None, + shared_head=None, + rpn_head=None, + bbox_roi_extractor=None, + bbox_head=None, + mask_roi_extractor=None, + mask_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None): + assert bbox_roi_extractor is not None + assert bbox_head is not None + super(CascadeRCNN, self).__init__() + + self.num_stages = num_stages + self.backbone = builder.build_backbone(backbone) + + if neck is not None: + self.neck = builder.build_neck(neck) + + if rpn_head is not None: + self.rpn_head = builder.build_head(rpn_head) + + if shared_head is not None: + self.shared_head = builder.build_shared_head(shared_head) + + if bbox_head is not None: + self.bbox_roi_extractor = nn.ModuleList() + self.bbox_head = nn.ModuleList() + if not isinstance(bbox_roi_extractor, list): + bbox_roi_extractor = [ + bbox_roi_extractor for _ in range(num_stages) + ] + if not isinstance(bbox_head, list): + bbox_head = [bbox_head for _ in range(num_stages)] + assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages + for roi_extractor, head in zip(bbox_roi_extractor, bbox_head): + self.bbox_roi_extractor.append( + builder.build_roi_extractor(roi_extractor)) + self.bbox_head.append(builder.build_head(head)) + + if mask_head is not None: + self.mask_head = nn.ModuleList() + if not isinstance(mask_head, list): + mask_head = [mask_head for _ in range(num_stages)] + assert len(mask_head) == self.num_stages + for head in mask_head: + self.mask_head.append(builder.build_head(head)) + if mask_roi_extractor is not None: + self.share_roi_extractor = False + self.mask_roi_extractor = nn.ModuleList() + if not isinstance(mask_roi_extractor, list): + mask_roi_extractor = [ + mask_roi_extractor for _ in range(num_stages) + ] + assert len(mask_roi_extractor) == self.num_stages + for roi_extractor in mask_roi_extractor: + self.mask_roi_extractor.append( + builder.build_roi_extractor(roi_extractor)) + else: + self.share_roi_extractor = True + self.mask_roi_extractor = self.bbox_roi_extractor + + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + self.init_weights(pretrained=pretrained) + + @property + def with_rpn(self): + return hasattr(self, 'rpn_head') and self.rpn_head is not None + + def init_weights(self, pretrained=None): + super(CascadeRCNN, self).init_weights(pretrained) + self.backbone.init_weights(pretrained=pretrained) + if self.with_neck: + if isinstance(self.neck, nn.Sequential): + for m in self.neck: + m.init_weights() + else: + self.neck.init_weights() + if self.with_rpn: + self.rpn_head.init_weights() + if self.with_shared_head: + self.shared_head.init_weights(pretrained=pretrained) + for i in range(self.num_stages): + if self.with_bbox: + self.bbox_roi_extractor[i].init_weights() + self.bbox_head[i].init_weights() + if self.with_mask: + if not self.share_roi_extractor: + self.mask_roi_extractor[i].init_weights() + self.mask_head[i].init_weights() + + def extract_feat(self, img): + x = self.backbone(img) + if self.with_neck: + x = self.neck(x) + return x + + def forward_train(self, + img, + img_meta, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + gt_masks=None, + proposals=None): + x = self.extract_feat(img) + + losses = dict() + + if self.with_rpn: + rpn_outs = self.rpn_head(x) + rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta, + self.train_cfg.rpn) + rpn_losses = self.rpn_head.loss( + *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) + losses.update(rpn_losses) + + proposal_cfg = self.train_cfg.get('rpn_proposal', + self.test_cfg.rpn) + proposal_inputs = rpn_outs + (img_meta, proposal_cfg) + proposal_list = self.rpn_head.get_bboxes(*proposal_inputs) + else: + proposal_list = proposals + + for i in range(self.num_stages): + self.current_stage = i + rcnn_train_cfg = self.train_cfg.rcnn[i] + lw = self.train_cfg.stage_loss_weights[i] + + # assign gts and sample proposals + sampling_results = [] + if self.with_bbox or self.with_mask: + bbox_assigner = build_assigner(rcnn_train_cfg.assigner) + bbox_sampler = build_sampler( + rcnn_train_cfg.sampler, context=self) + num_imgs = img.size(0) + if gt_bboxes_ignore is None: + gt_bboxes_ignore = [None for _ in range(num_imgs)] + + for j in range(num_imgs): + assign_result = bbox_assigner.assign( + proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j], + gt_labels[j]) + sampling_result = bbox_sampler.sample( + assign_result, + proposal_list[j], + gt_bboxes[j], + gt_labels[j], + feats=[lvl_feat[j][None] for lvl_feat in x]) + sampling_results.append(sampling_result) + + # bbox head forward and loss + bbox_roi_extractor = self.bbox_roi_extractor[i] + bbox_head = self.bbox_head[i] + + rois = bbox2roi([res.bboxes for res in sampling_results]) + bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs], + rois) + if self.with_shared_head: + bbox_feats = self.shared_head(bbox_feats) + cls_score, bbox_pred = bbox_head(bbox_feats) + + bbox_targets = bbox_head.get_target(sampling_results, gt_bboxes, + gt_labels, rcnn_train_cfg) + loss_bbox = bbox_head.loss(cls_score, bbox_pred, *bbox_targets) + for name, value in loss_bbox.items(): + losses['s{}.{}'.format(i, name)] = ( + value * lw if 'loss' in name else value) + + # mask head forward and loss + if self.with_mask: + if not self.share_roi_extractor: + mask_roi_extractor = self.mask_roi_extractor[i] + pos_rois = bbox2roi( + [res.pos_bboxes for res in sampling_results]) + mask_feats = mask_roi_extractor( + x[:mask_roi_extractor.num_inputs], pos_rois) + if self.with_shared_head: + mask_feats = self.shared_head(mask_feats) + else: + # reuse positive bbox feats + pos_inds = [] + device = bbox_feats.device + for res in sampling_results: + pos_inds.append( + torch.ones( + res.pos_bboxes.shape[0], + device=device, + dtype=torch.uint8)) + pos_inds.append( + torch.zeros( + res.neg_bboxes.shape[0], + device=device, + dtype=torch.uint8)) + pos_inds = torch.cat(pos_inds) + mask_feats = bbox_feats[pos_inds] + mask_head = self.mask_head[i] + mask_pred = mask_head(mask_feats) + mask_targets = mask_head.get_target(sampling_results, gt_masks, + rcnn_train_cfg) + pos_labels = torch.cat( + [res.pos_gt_labels for res in sampling_results]) + loss_mask = mask_head.loss(mask_pred, mask_targets, pos_labels) + for name, value in loss_mask.items(): + losses['s{}.{}'.format(i, name)] = ( + value * lw if 'loss' in name else value) + + # refine bboxes + if i < self.num_stages - 1: + pos_is_gts = [res.pos_is_gt for res in sampling_results] + roi_labels = bbox_targets[0] # bbox_targets is a tuple + with torch.no_grad(): + proposal_list = bbox_head.refine_bboxes( + rois, roi_labels, bbox_pred, pos_is_gts, img_meta) + + return losses + + def simple_test(self, img, img_meta, proposals=None, rescale=False): + x = self.extract_feat(img) + proposal_list = self.simple_test_rpn( + x, img_meta, self.test_cfg.rpn) if proposals is None else proposals + + img_shape = img_meta[0]['img_shape'] + ori_shape = img_meta[0]['ori_shape'] + scale_factor = img_meta[0]['scale_factor'] + + # "ms" in variable names means multi-stage + ms_bbox_result = {} + ms_segm_result = {} + ms_scores = [] + rcnn_test_cfg = self.test_cfg.rcnn + + rois = bbox2roi(proposal_list) + for i in range(self.num_stages): + bbox_roi_extractor = self.bbox_roi_extractor[i] + bbox_head = self.bbox_head[i] + + bbox_feats = bbox_roi_extractor( + x[:len(bbox_roi_extractor.featmap_strides)], rois) + if self.with_shared_head: + bbox_feats = self.shared_head(bbox_feats) + + cls_score, bbox_pred = bbox_head(bbox_feats) + ms_scores.append(cls_score) + + if self.test_cfg.keep_all_stages: + det_bboxes, det_labels = bbox_head.get_det_bboxes( + rois, + cls_score, + bbox_pred, + img_shape, + scale_factor, + rescale=rescale, + cfg=rcnn_test_cfg) + bbox_result = bbox2result(det_bboxes, det_labels, + bbox_head.num_classes) + ms_bbox_result['stage{}'.format(i)] = bbox_result + + if self.with_mask: + mask_roi_extractor = self.mask_roi_extractor[i] + mask_head = self.mask_head[i] + if det_bboxes.shape[0] == 0: + segm_result = [ + [] for _ in range(mask_head.num_classes - 1) + ] + else: + _bboxes = ( + det_bboxes[:, :4] * scale_factor + if rescale else det_bboxes) + mask_rois = bbox2roi([_bboxes]) + mask_feats = mask_roi_extractor( + x[:len(mask_roi_extractor.featmap_strides)], + mask_rois) + if self.with_shared_head: + mask_feats = self.shared_head(mask_feats, i) + mask_pred = mask_head(mask_feats) + segm_result = mask_head.get_seg_masks( + mask_pred, _bboxes, det_labels, rcnn_test_cfg, + ori_shape, scale_factor, rescale) + ms_segm_result['stage{}'.format(i)] = segm_result + + if i < self.num_stages - 1: + bbox_label = cls_score.argmax(dim=1) + rois = bbox_head.regress_by_class(rois, bbox_label, bbox_pred, + img_meta[0]) + + cls_score = sum(ms_scores) / self.num_stages + det_bboxes, det_labels = self.bbox_head[-1].get_det_bboxes( + rois, + cls_score, + bbox_pred, + img_shape, + scale_factor, + rescale=rescale, + cfg=rcnn_test_cfg) + bbox_result = bbox2result(det_bboxes, det_labels, + self.bbox_head[-1].num_classes) + ms_bbox_result['ensemble'] = bbox_result + + if self.with_mask: + if det_bboxes.shape[0] == 0: + segm_result = [ + [] for _ in range(self.mask_head[-1].num_classes - 1) + ] + else: + _bboxes = ( + det_bboxes[:, :4] * scale_factor + if rescale else det_bboxes) + mask_rois = bbox2roi([_bboxes]) + aug_masks = [] + for i in range(self.num_stages): + mask_roi_extractor = self.mask_roi_extractor[i] + mask_feats = mask_roi_extractor( + x[:len(mask_roi_extractor.featmap_strides)], mask_rois) + if self.with_shared_head: + mask_feats = self.shared_head(mask_feats) + mask_pred = self.mask_head[i](mask_feats) + aug_masks.append(mask_pred.sigmoid().cpu().numpy()) + merged_masks = merge_aug_masks(aug_masks, + [img_meta] * self.num_stages, + self.test_cfg.rcnn) + segm_result = self.mask_head[-1].get_seg_masks( + merged_masks, _bboxes, det_labels, rcnn_test_cfg, + ori_shape, scale_factor, rescale) + ms_segm_result['ensemble'] = segm_result + + if not self.test_cfg.keep_all_stages: + if self.with_mask: + results = (ms_bbox_result['ensemble'], + ms_segm_result['ensemble']) + else: + results = ms_bbox_result['ensemble'] + else: + if self.with_mask: + results = { + stage: (ms_bbox_result[stage], ms_segm_result[stage]) + for stage in ms_bbox_result + } + else: + results = ms_bbox_result + + return results + + def aug_test(self, img, img_meta, proposals=None, rescale=False): + raise NotImplementedError + + def show_result(self, data, result, img_norm_cfg, **kwargs): + if self.with_mask: + ms_bbox_result, ms_segm_result = result + if isinstance(ms_bbox_result, dict): + result = (ms_bbox_result['ensemble'], + ms_segm_result['ensemble']) + else: + if isinstance(result, dict): + result = result['ensemble'] + super(CascadeRCNN, self).show_result(data, result, img_norm_cfg, + **kwargs) diff --git a/mmdet/models/detectors/fast_rcnn.py b/mmdet/models/detectors/fast_rcnn.py new file mode 100644 index 0000000..64c0391 --- /dev/null +++ b/mmdet/models/detectors/fast_rcnn.py @@ -0,0 +1,50 @@ +from .two_stage import TwoStageDetector +from ..registry import DETECTORS + + +@DETECTORS.register_module +class FastRCNN(TwoStageDetector): + + def __init__(self, + backbone, + bbox_roi_extractor, + bbox_head, + train_cfg, + test_cfg, + neck=None, + shared_head=None, + mask_roi_extractor=None, + mask_head=None, + pretrained=None): + super(FastRCNN, self).__init__( + backbone=backbone, + neck=neck, + shared_head=shared_head, + bbox_roi_extractor=bbox_roi_extractor, + bbox_head=bbox_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + mask_roi_extractor=mask_roi_extractor, + mask_head=mask_head, + pretrained=pretrained) + + def forward_test(self, imgs, img_metas, proposals, **kwargs): + for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: + if not isinstance(var, list): + raise TypeError('{} must be a list, but got {}'.format( + name, type(var))) + + num_augs = len(imgs) + if num_augs != len(img_metas): + raise ValueError( + 'num of augmentations ({}) != num of image meta ({})'.format( + len(imgs), len(img_metas))) + # TODO: remove the restriction of imgs_per_gpu == 1 when prepared + imgs_per_gpu = imgs[0].size(0) + assert imgs_per_gpu == 1 + + if num_augs == 1: + return self.simple_test(imgs[0], img_metas[0], proposals[0], + **kwargs) + else: + return self.aug_test(imgs, img_metas, proposals, **kwargs) diff --git a/mmdet/models/detectors/faster_rcnn.py b/mmdet/models/detectors/faster_rcnn.py new file mode 100644 index 0000000..b4e961b --- /dev/null +++ b/mmdet/models/detectors/faster_rcnn.py @@ -0,0 +1,27 @@ +from .two_stage import TwoStageDetector +from ..registry import DETECTORS + + +@DETECTORS.register_module +class FasterRCNN(TwoStageDetector): + + def __init__(self, + backbone, + rpn_head, + bbox_roi_extractor, + bbox_head, + train_cfg, + test_cfg, + neck=None, + shared_head=None, + pretrained=None): + super(FasterRCNN, self).__init__( + backbone=backbone, + neck=neck, + shared_head=shared_head, + rpn_head=rpn_head, + bbox_roi_extractor=bbox_roi_extractor, + bbox_head=bbox_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + pretrained=pretrained) diff --git a/mmdet/models/detectors/fcos.py b/mmdet/models/detectors/fcos.py new file mode 100644 index 0000000..4c3dce1 --- /dev/null +++ b/mmdet/models/detectors/fcos.py @@ -0,0 +1,16 @@ +from .single_stage import SingleStageDetector +from ..registry import DETECTORS + + +@DETECTORS.register_module +class FCOS(SingleStageDetector): + + def __init__(self, + backbone, + neck, + bbox_head, + train_cfg=None, + test_cfg=None, + pretrained=None): + super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg, + test_cfg, pretrained) diff --git a/mmdet/models/detectors/grid_rcnn.py b/mmdet/models/detectors/grid_rcnn.py new file mode 100644 index 0000000..2e6661a --- /dev/null +++ b/mmdet/models/detectors/grid_rcnn.py @@ -0,0 +1,179 @@ +from .two_stage import TwoStageDetector +from ..registry import DETECTORS + +import torch +import torch.nn as nn + +from .test_mixins import RPNTestMixin, BBoxTestMixin, MaskTestMixin +from .. import builder +from mmdet.core import bbox2roi, bbox2result, build_assigner, build_sampler, random_jitter + +@DETECTORS.register_module +class GridRCNN(TwoStageDetector): + + def __init__(self, + backbone, + rpn_head, + bbox_roi_extractor, + bbox_head, + grid_roi_extractor, + grid_head, + train_cfg, + test_cfg, + neck=None, + shared_head=None, + pretrained=None): + super(GridRCNN, self).__init__( + backbone=backbone, + neck=neck, + shared_head=shared_head, + rpn_head=rpn_head, + bbox_roi_extractor=bbox_roi_extractor, + bbox_head=bbox_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + pretrained=pretrained) + + if grid_head is not None: + if grid_roi_extractor is not None: + self.grid_roi_extractor = builder.build_roi_extractor( + grid_roi_extractor) + self.share_roi_extractor = False + else: + self.share_roi_extractor = True + self.grid_roi_extractor = self.bbox_roi_extractor + self.grid_head = builder.build_head(grid_head) + + self.init_weights(pretrained=pretrained) + + @property + def with_grid(self): + return hasattr(self, 'grid_head') and self.grid_head is not None + + def init_weights(self, pretrained=None): + super(GridRCNN, self).init_weights(pretrained) + + if self.with_grid: + self.grid_head.init_weights() + if not self.share_roi_extractor: + self.grid_roi_extractor.init_weights() + + def forward_train(self, + img, + img_meta, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + gt_masks=None, + proposals=None): + x = self.extract_feat(img) + + losses = dict() + + # RPN forward and loss + if self.with_rpn: + rpn_outs = self.rpn_head(x) + rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta, + self.train_cfg.rpn) + rpn_losses = self.rpn_head.loss( + *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) + losses.update(rpn_losses) + + proposal_cfg = self.train_cfg.get('rpn_proposal', + self.test_cfg.rpn) + proposal_inputs = rpn_outs + (img_meta, proposal_cfg) + proposal_list = self.rpn_head.get_bboxes(*proposal_inputs) + else: + proposal_list = proposals + + # assign gts and sample proposals + if self.with_bbox or self.with_grid: + bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner) + bbox_sampler = build_sampler( + self.train_cfg.rcnn.sampler, context=self) + num_imgs = img.size(0) + if gt_bboxes_ignore is None: + gt_bboxes_ignore = [None for _ in range(num_imgs)] + sampling_results = [] + for i in range(num_imgs): + assign_result = bbox_assigner.assign( + proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], + gt_labels[i]) + sampling_result = bbox_sampler.sample( + assign_result, + proposal_list[i], + gt_bboxes[i], + gt_labels[i], + feats=[lvl_feat[i][None] for lvl_feat in x]) + sampling_results.append(sampling_result) + + # bbox head forward and loss + if self.with_bbox: + rois = bbox2roi([res.bboxes for res in sampling_results]) + # TODO: a more flexible way to decide which feature maps to use + bbox_feats = self.bbox_roi_extractor( + x[:self.bbox_roi_extractor.num_inputs], rois) + if self.with_shared_head: + bbox_feats = self.shared_head(bbox_feats) + cls_score, bbox_pred = self.bbox_head(bbox_feats) + + bbox_targets = self.bbox_head.get_target( + sampling_results, gt_bboxes, gt_labels, self.train_cfg.rcnn) + loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, + *bbox_targets) + losses.update(loss_bbox) + + # Grid head forward and loss + if self.with_grid: + sampling_results = random_jitter(sampling_results,img_meta) + pos_rois = bbox2roi( + [res.pos_bboxes for res in sampling_results]) + grid_feats = self.grid_roi_extractor( + x[:self.grid_roi_extractor.num_inputs], pos_rois) + if self.with_shared_head: + grid_feats = self.shared_head(grid_feats) + # Accelerate training + max_sample_num_grid = self.train_cfg.rcnn.get('max_num_grid',192) + sample_idx = torch.randperm(grid_feats.shape[0])[:min(grid_feats.shape[0],max_sample_num_grid)] + grid_feats = grid_feats[sample_idx] + + grid_pred1,grid_pred2 = self.grid_head(grid_feats) + + grid_targets = self.grid_head.get_target( + sampling_results, self.train_cfg.rcnn) + grid_targets = grid_targets[sample_idx] + + loss_grid = self.grid_head.loss(grid_pred1,grid_pred2,grid_targets) + losses.update(loss_grid) + + return losses + + def simple_test(self, img, img_meta, proposals=None, rescale=False): + """Test without augmentation.""" + assert self.with_bbox, "Bbox head must be implemented." + + x = self.extract_feat(img) + + proposal_list = self.simple_test_rpn( + x, img_meta, self.test_cfg.rpn) if proposals is None else proposals + + det_bboxes, det_labels = self.simple_test_bboxes( + x, img_meta, proposal_list, self.test_cfg.rcnn, rescale=rescale) + + # pack rois into bboxes + gridrois = bbox2roi([det_bboxes[:,:4]]) + grid_feats = self.grid_roi_extractor( + x[:len(self.grid_roi_extractor.featmap_strides)], gridrois) + if gridrois.shape[0] != 0: + self.grid_head.test_mode = True + _,grid_pred = self.grid_head(grid_feats) + det_bboxes = self.grid_head.get_bboxes(det_bboxes, grid_pred, img_meta) + if rescale: + det_bboxes[:,:4] /= img_meta[0]['scale_factor'] + else: + det_bboxes = torch.Tensor([]) + + bbox_results = bbox2result(det_bboxes, det_labels, + self.bbox_head.num_classes) + + return bbox_results diff --git a/mmdet/models/detectors/htc.py b/mmdet/models/detectors/htc.py new file mode 100644 index 0000000..0384aa9 --- /dev/null +++ b/mmdet/models/detectors/htc.py @@ -0,0 +1,396 @@ +import torch +import torch.nn.functional as F + +from .cascade_rcnn import CascadeRCNN +from .. import builder +from ..registry import DETECTORS +from mmdet.core import (bbox2roi, bbox2result, build_assigner, build_sampler, + merge_aug_masks) + + +@DETECTORS.register_module +class HybridTaskCascade(CascadeRCNN): + + def __init__(self, + num_stages, + backbone, + semantic_roi_extractor=None, + semantic_head=None, + semantic_fusion=('bbox', 'mask'), + interleaved=True, + mask_info_flow=True, + **kwargs): + super(HybridTaskCascade, self).__init__(num_stages, backbone, **kwargs) + assert self.with_bbox and self.with_mask + assert not self.with_shared_head # shared head not supported + if semantic_head is not None: + self.semantic_roi_extractor = builder.build_roi_extractor( + semantic_roi_extractor) + self.semantic_head = builder.build_head(semantic_head) + + self.semantic_fusion = semantic_fusion + self.interleaved = interleaved + self.mask_info_flow = mask_info_flow + + @property + def with_semantic(self): + if hasattr(self, 'semantic_head') and self.semantic_head is not None: + return True + else: + return False + + def _bbox_forward_train(self, + stage, + x, + sampling_results, + gt_bboxes, + gt_labels, + rcnn_train_cfg, + semantic_feat=None): + rois = bbox2roi([res.bboxes for res in sampling_results]) + bbox_roi_extractor = self.bbox_roi_extractor[stage] + bbox_head = self.bbox_head[stage] + bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs], + rois) + # semantic feature fusion + # element-wise sum for original features and pooled semantic features + if self.with_semantic and 'bbox' in self.semantic_fusion: + bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat], + rois) + if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]: + bbox_semantic_feat = F.adaptive_avg_pool2d( + bbox_semantic_feat, bbox_feats.shape[-2:]) + bbox_feats += bbox_semantic_feat + + cls_score, bbox_pred = bbox_head(bbox_feats) + + bbox_targets = bbox_head.get_target(sampling_results, gt_bboxes, + gt_labels, rcnn_train_cfg) + loss_bbox = bbox_head.loss(cls_score, bbox_pred, *bbox_targets) + return loss_bbox, rois, bbox_targets, bbox_pred + + def _mask_forward_train(self, + stage, + x, + sampling_results, + gt_masks, + rcnn_train_cfg, + semantic_feat=None): + mask_roi_extractor = self.mask_roi_extractor[stage] + mask_head = self.mask_head[stage] + pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) + mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs], + pos_rois) + + # semantic feature fusion + # element-wise sum for original features and pooled semantic features + if self.with_semantic and 'mask' in self.semantic_fusion: + mask_semantic_feat = self.semantic_roi_extractor([semantic_feat], + pos_rois) + if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]: + mask_semantic_feat = F.adaptive_avg_pool2d( + mask_semantic_feat, mask_feats.shape[-2:]) + mask_feats += mask_semantic_feat + + # mask information flow + # forward all previous mask heads to obtain last_feat, and fuse it + # with the normal mask feature + if self.mask_info_flow: + last_feat = None + for i in range(stage): + last_feat = self.mask_head[i]( + mask_feats, last_feat, return_logits=False) + mask_pred = mask_head(mask_feats, last_feat, return_feat=False) + else: + mask_pred = mask_head(mask_feats) + + mask_targets = mask_head.get_target(sampling_results, gt_masks, + rcnn_train_cfg) + pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) + loss_mask = mask_head.loss(mask_pred, mask_targets, pos_labels) + return loss_mask + + def _bbox_forward_test(self, stage, x, rois, semantic_feat=None): + bbox_roi_extractor = self.bbox_roi_extractor[stage] + bbox_head = self.bbox_head[stage] + bbox_feats = bbox_roi_extractor( + x[:len(bbox_roi_extractor.featmap_strides)], rois) + if self.with_semantic and 'bbox' in self.semantic_fusion: + bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat], + rois) + if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]: + bbox_semantic_feat = F.adaptive_avg_pool2d( + bbox_semantic_feat, bbox_feats.shape[-2:]) + bbox_feats += bbox_semantic_feat + cls_score, bbox_pred = bbox_head(bbox_feats) + return cls_score, bbox_pred + + def _mask_forward_test(self, stage, x, bboxes, semantic_feat=None): + mask_roi_extractor = self.mask_roi_extractor[stage] + mask_head = self.mask_head[stage] + mask_rois = bbox2roi([bboxes]) + mask_feats = mask_roi_extractor( + x[:len(mask_roi_extractor.featmap_strides)], mask_rois) + if self.with_semantic and 'mask' in self.semantic_fusion: + mask_semantic_feat = self.semantic_roi_extractor([semantic_feat], + mask_rois) + if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]: + mask_semantic_feat = F.adaptive_avg_pool2d( + mask_semantic_feat, mask_feats.shape[-2:]) + mask_feats += mask_semantic_feat + if self.mask_info_flow: + last_feat = None + last_pred = None + for i in range(stage): + mask_pred, last_feat = self.mask_head[i](mask_feats, last_feat) + if last_pred is not None: + mask_pred = mask_pred + last_pred + last_pred = mask_pred + mask_pred = mask_head(mask_feats, last_feat, return_feat=False) + if last_pred is not None: + mask_pred = mask_pred + last_pred + else: + mask_pred = mask_head(mask_feats) + return mask_pred + + def forward_train(self, + img, + img_meta, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + gt_masks=None, + gt_semantic_seg=None, + proposals=None): + x = self.extract_feat(img) + + losses = dict() + + # RPN part, the same as normal two-stage detectors + if self.with_rpn: + rpn_outs = self.rpn_head(x) + rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta, + self.train_cfg.rpn) + rpn_losses = self.rpn_head.loss( + *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) + losses.update(rpn_losses) + + proposal_cfg = self.train_cfg.get('rpn_proposal', + self.test_cfg.rpn) + proposal_inputs = rpn_outs + (img_meta, proposal_cfg) + proposal_list = self.rpn_head.get_bboxes(*proposal_inputs) + else: + proposal_list = proposals + + # semantic segmentation part + # 2 outputs: segmentation prediction and embedded features + if self.with_semantic: + semantic_pred, semantic_feat = self.semantic_head(x) + loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg) + losses['loss_semantic_seg'] = loss_seg + else: + semantic_feat = None + + for i in range(self.num_stages): + self.current_stage = i + rcnn_train_cfg = self.train_cfg.rcnn[i] + lw = self.train_cfg.stage_loss_weights[i] + + # assign gts and sample proposals + sampling_results = [] + bbox_assigner = build_assigner(rcnn_train_cfg.assigner) + bbox_sampler = build_sampler(rcnn_train_cfg.sampler, context=self) + num_imgs = img.size(0) + if gt_bboxes_ignore is None: + gt_bboxes_ignore = [None for _ in range(num_imgs)] + + for j in range(num_imgs): + assign_result = bbox_assigner.assign( + proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j], + gt_labels[j]) + sampling_result = bbox_sampler.sample( + assign_result, + proposal_list[j], + gt_bboxes[j], + gt_labels[j], + feats=[lvl_feat[j][None] for lvl_feat in x]) + sampling_results.append(sampling_result) + + # bbox head forward and loss + loss_bbox, rois, bbox_targets, bbox_pred = \ + self._bbox_forward_train( + i, x, sampling_results, gt_bboxes, gt_labels, + rcnn_train_cfg, semantic_feat) + roi_labels = bbox_targets[0] + + for name, value in loss_bbox.items(): + losses['s{}.{}'.format(i, name)] = ( + value * lw if 'loss' in name else value) + + # mask head forward and loss + if self.with_mask: + # interleaved execution: use regressed bboxes by the box branch + # to train the mask branch + if self.interleaved: + pos_is_gts = [res.pos_is_gt for res in sampling_results] + with torch.no_grad(): + proposal_list = self.bbox_head[i].refine_bboxes( + rois, roi_labels, bbox_pred, pos_is_gts, img_meta) + # re-assign and sample 512 RoIs from 512 RoIs + sampling_results = [] + for j in range(num_imgs): + assign_result = bbox_assigner.assign( + proposal_list[j], gt_bboxes[j], + gt_bboxes_ignore[j], gt_labels[j]) + sampling_result = bbox_sampler.sample( + assign_result, + proposal_list[j], + gt_bboxes[j], + gt_labels[j], + feats=[lvl_feat[j][None] for lvl_feat in x]) + sampling_results.append(sampling_result) + loss_mask = self._mask_forward_train(i, x, sampling_results, + gt_masks, rcnn_train_cfg, + semantic_feat) + for name, value in loss_mask.items(): + losses['s{}.{}'.format(i, name)] = ( + value * lw if 'loss' in name else value) + + # refine bboxes (same as Cascade R-CNN) + if i < self.num_stages - 1 and not self.interleaved: + pos_is_gts = [res.pos_is_gt for res in sampling_results] + with torch.no_grad(): + proposal_list = self.bbox_head[i].refine_bboxes( + rois, roi_labels, bbox_pred, pos_is_gts, img_meta) + + return losses + + def simple_test(self, img, img_meta, proposals=None, rescale=False): + x = self.extract_feat(img) + proposal_list = self.simple_test_rpn( + x, img_meta, self.test_cfg.rpn) if proposals is None else proposals + + if self.with_semantic: + _, semantic_feat = self.semantic_head(x) + else: + semantic_feat = None + + img_shape = img_meta[0]['img_shape'] + ori_shape = img_meta[0]['ori_shape'] + scale_factor = img_meta[0]['scale_factor'] + + # "ms" in variable names means multi-stage + ms_bbox_result = {} + ms_segm_result = {} + ms_scores = [] + rcnn_test_cfg = self.test_cfg.rcnn + + rois = bbox2roi(proposal_list) + for i in range(self.num_stages): + bbox_head = self.bbox_head[i] + cls_score, bbox_pred = self._bbox_forward_test( + i, x, rois, semantic_feat=semantic_feat) + ms_scores.append(cls_score) + + if self.test_cfg.keep_all_stages: + det_bboxes, det_labels = bbox_head.get_det_bboxes( + rois, + cls_score, + bbox_pred, + img_shape, + scale_factor, + rescale=rescale, + nms_cfg=rcnn_test_cfg) + bbox_result = bbox2result(det_bboxes, det_labels, + bbox_head.num_classes) + ms_bbox_result['stage{}'.format(i)] = bbox_result + + if self.with_mask: + mask_head = self.mask_head[i] + if det_bboxes.shape[0] == 0: + segm_result = [ + [] for _ in range(mask_head.num_classes - 1) + ] + else: + _bboxes = ( + det_bboxes[:, :4] * scale_factor + if rescale else det_bboxes) + mask_pred = self._mask_forward_test( + i, x, _bboxes, semantic_feat=semantic_feat) + segm_result = mask_head.get_seg_masks( + mask_pred, _bboxes, det_labels, rcnn_test_cfg, + ori_shape, scale_factor, rescale) + ms_segm_result['stage{}'.format(i)] = segm_result + + if i < self.num_stages - 1: + bbox_label = cls_score.argmax(dim=1) + rois = bbox_head.regress_by_class(rois, bbox_label, bbox_pred, + img_meta[0]) + + cls_score = sum(ms_scores) / float(len(ms_scores)) + det_bboxes, det_labels = self.bbox_head[-1].get_det_bboxes( + rois, + cls_score, + bbox_pred, + img_shape, + scale_factor, + rescale=rescale, + cfg=rcnn_test_cfg) + bbox_result = bbox2result(det_bboxes, det_labels, + self.bbox_head[-1].num_classes) + ms_bbox_result['ensemble'] = bbox_result + + if self.with_mask: + if det_bboxes.shape[0] == 0: + segm_result = [ + [] for _ in range(self.mask_head[-1].num_classes - 1) + ] + else: + _bboxes = ( + det_bboxes[:, :4] * scale_factor + if rescale else det_bboxes) + + mask_rois = bbox2roi([_bboxes]) + aug_masks = [] + mask_roi_extractor = self.mask_roi_extractor[-1] + mask_feats = mask_roi_extractor( + x[:len(mask_roi_extractor.featmap_strides)], mask_rois) + if self.with_semantic and 'mask' in self.semantic_fusion: + mask_semantic_feat = self.semantic_roi_extractor( + [semantic_feat], mask_rois) + mask_feats += mask_semantic_feat + last_feat = None + for i in range(self.num_stages): + mask_head = self.mask_head[i] + if self.mask_info_flow: + mask_pred, last_feat = mask_head(mask_feats, last_feat) + else: + mask_pred = mask_head(mask_feats) + aug_masks.append(mask_pred.sigmoid().cpu().numpy()) + merged_masks = merge_aug_masks(aug_masks, + [img_meta] * self.num_stages, + self.test_cfg.rcnn) + segm_result = self.mask_head[-1].get_seg_masks( + merged_masks, _bboxes, det_labels, rcnn_test_cfg, + ori_shape, scale_factor, rescale) + ms_segm_result['ensemble'] = segm_result + + if not self.test_cfg.keep_all_stages: + if self.with_mask: + results = (ms_bbox_result['ensemble'], + ms_segm_result['ensemble']) + else: + results = ms_bbox_result['ensemble'] + else: + if self.with_mask: + results = { + stage: (ms_bbox_result[stage], ms_segm_result[stage]) + for stage in ms_bbox_result + } + else: + results = ms_bbox_result + + return results + + def aug_test(self, img, img_meta, proposals=None, rescale=False): + raise NotImplementedError diff --git a/mmdet/models/detectors/mask_rcnn.py b/mmdet/models/detectors/mask_rcnn.py new file mode 100644 index 0000000..003e87f --- /dev/null +++ b/mmdet/models/detectors/mask_rcnn.py @@ -0,0 +1,31 @@ +from .two_stage import TwoStageDetector +from ..registry import DETECTORS + + +@DETECTORS.register_module +class MaskRCNN(TwoStageDetector): + + def __init__(self, + backbone, + rpn_head, + bbox_roi_extractor, + bbox_head, + mask_roi_extractor, + mask_head, + train_cfg, + test_cfg, + neck=None, + shared_head=None, + pretrained=None): + super(MaskRCNN, self).__init__( + backbone=backbone, + neck=neck, + shared_head=shared_head, + rpn_head=rpn_head, + bbox_roi_extractor=bbox_roi_extractor, + bbox_head=bbox_head, + mask_roi_extractor=mask_roi_extractor, + mask_head=mask_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + pretrained=pretrained) diff --git a/mmdet/models/detectors/retinanet.py b/mmdet/models/detectors/retinanet.py new file mode 100644 index 0000000..0e5b6fd --- /dev/null +++ b/mmdet/models/detectors/retinanet.py @@ -0,0 +1,16 @@ +from .single_stage import SingleStageDetector +from ..registry import DETECTORS + + +@DETECTORS.register_module +class RetinaNet(SingleStageDetector): + + def __init__(self, + backbone, + neck, + bbox_head, + train_cfg=None, + test_cfg=None, + pretrained=None): + super(RetinaNet, self).__init__(backbone, neck, bbox_head, train_cfg, + test_cfg, pretrained) diff --git a/mmdet/models/detectors/rpn.py b/mmdet/models/detectors/rpn.py new file mode 100644 index 0000000..51043af --- /dev/null +++ b/mmdet/models/detectors/rpn.py @@ -0,0 +1,92 @@ +import mmcv + +from mmdet.core import tensor2imgs, bbox_mapping +from .base import BaseDetector +from .test_mixins import RPNTestMixin +from .. import builder +from ..registry import DETECTORS + + +@DETECTORS.register_module +class RPN(BaseDetector, RPNTestMixin): + + def __init__(self, + backbone, + neck, + rpn_head, + train_cfg, + test_cfg, + pretrained=None): + super(RPN, self).__init__() + self.backbone = builder.build_backbone(backbone) + self.neck = builder.build_neck(neck) if neck is not None else None + self.rpn_head = builder.build_head(rpn_head) + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self.init_weights(pretrained=pretrained) + + def init_weights(self, pretrained=None): + super(RPN, self).init_weights(pretrained) + self.backbone.init_weights(pretrained=pretrained) + if self.with_neck: + self.neck.init_weights() + self.rpn_head.init_weights() + + def extract_feat(self, img): + x = self.backbone(img) + if self.with_neck: + x = self.neck(x) + return x + + def forward_train(self, + img, + img_meta, + gt_bboxes=None, + gt_bboxes_ignore=None): + if self.train_cfg.rpn.get('debug', False): + self.rpn_head.debug_imgs = tensor2imgs(img) + + x = self.extract_feat(img) + rpn_outs = self.rpn_head(x) + + rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta, self.train_cfg.rpn) + losses = self.rpn_head.loss( + *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) + return losses + + def simple_test(self, img, img_meta, rescale=False): + x = self.extract_feat(img) + proposal_list = self.simple_test_rpn(x, img_meta, self.test_cfg.rpn) + if rescale: + for proposals, meta in zip(proposal_list, img_meta): + proposals[:, :4] /= meta['scale_factor'] + # TODO: remove this restriction + return proposal_list[0].cpu().numpy() + + def aug_test(self, imgs, img_metas, rescale=False): + proposal_list = self.aug_test_rpn( + self.extract_feats(imgs), img_metas, self.test_cfg.rpn) + if not rescale: + for proposals, img_meta in zip(proposal_list, img_metas[0]): + img_shape = img_meta['img_shape'] + scale_factor = img_meta['scale_factor'] + flip = img_meta['flip'] + proposals[:, :4] = bbox_mapping(proposals[:, :4], img_shape, + scale_factor, flip) + # TODO: remove this restriction + return proposal_list[0].cpu().numpy() + + def show_result(self, data, result, img_norm_cfg, dataset=None, top_k=20): + """Show RPN proposals on the image. + + Although we assume batch size is 1, this method supports arbitrary + batch size. + """ + img_tensor = data['img'][0] + img_metas = data['img_meta'][0].data[0] + imgs = tensor2imgs(img_tensor, **img_norm_cfg) + assert len(imgs) == len(img_metas) + for img, img_meta in zip(imgs, img_metas): + h, w, _ = img_meta['img_shape'] + img_show = img[:h, :w, :] + mmcv.imshow_bboxes(img_show, result, top_k=top_k) diff --git a/mmdet/models/detectors/single_stage.py b/mmdet/models/detectors/single_stage.py new file mode 100644 index 0000000..6f73b34 --- /dev/null +++ b/mmdet/models/detectors/single_stage.py @@ -0,0 +1,70 @@ +import torch.nn as nn + +from .base import BaseDetector +from .. import builder +from ..registry import DETECTORS +from mmdet.core import bbox2result + + +@DETECTORS.register_module +class SingleStageDetector(BaseDetector): + + def __init__(self, + backbone, + neck=None, + bbox_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None): + super(SingleStageDetector, self).__init__() + self.backbone = builder.build_backbone(backbone) + if neck is not None: + self.neck = builder.build_neck(neck) + self.bbox_head = builder.build_head(bbox_head) + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self.init_weights(pretrained=pretrained) + + def init_weights(self, pretrained=None): + super(SingleStageDetector, self).init_weights(pretrained) + self.backbone.init_weights(pretrained=pretrained) + if self.with_neck: + if isinstance(self.neck, nn.Sequential): + for m in self.neck: + m.init_weights() + else: + self.neck.init_weights() + self.bbox_head.init_weights() + + def extract_feat(self, img): + x = self.backbone(img) + if self.with_neck: + x = self.neck(x) + return x + + def forward_train(self, + img, + img_metas, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None): + x = self.extract_feat(img) + outs = self.bbox_head(x) + loss_inputs = outs + (gt_bboxes, gt_labels, img_metas, self.train_cfg) + losses = self.bbox_head.loss( + *loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) + return losses + + def simple_test(self, img, img_meta, rescale=False): + x = self.extract_feat(img) + outs = self.bbox_head(x) + bbox_inputs = outs + (img_meta, self.test_cfg, rescale) + bbox_list = self.bbox_head.get_bboxes(*bbox_inputs) + bbox_results = [ + bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) + for det_bboxes, det_labels in bbox_list + ] + return bbox_results[0] + + def aug_test(self, imgs, img_metas, rescale=False): + raise NotImplementedError diff --git a/mmdet/models/detectors/test_mixins.py b/mmdet/models/detectors/test_mixins.py new file mode 100644 index 0000000..b64f2cd --- /dev/null +++ b/mmdet/models/detectors/test_mixins.py @@ -0,0 +1,153 @@ +from mmdet.core import (bbox2roi, bbox_mapping, merge_aug_proposals, + merge_aug_bboxes, merge_aug_masks, multiclass_nms) + + +class RPNTestMixin(object): + + def simple_test_rpn(self, x, img_meta, rpn_test_cfg): + rpn_outs = self.rpn_head(x) + proposal_inputs = rpn_outs + (img_meta, rpn_test_cfg) + proposal_list = self.rpn_head.get_bboxes(*proposal_inputs) + return proposal_list + + def aug_test_rpn(self, feats, img_metas, rpn_test_cfg): + imgs_per_gpu = len(img_metas[0]) + aug_proposals = [[] for _ in range(imgs_per_gpu)] + for x, img_meta in zip(feats, img_metas): + proposal_list = self.simple_test_rpn(x, img_meta, rpn_test_cfg) + for i, proposals in enumerate(proposal_list): + aug_proposals[i].append(proposals) + # after merging, proposals will be rescaled to the original image size + merged_proposals = [ + merge_aug_proposals(proposals, img_meta, rpn_test_cfg) + for proposals, img_meta in zip(aug_proposals, img_metas) + ] + return merged_proposals + + +class BBoxTestMixin(object): + + def simple_test_bboxes(self, + x, + img_meta, + proposals, + rcnn_test_cfg, + rescale=False): + """Test only det bboxes without augmentation.""" + rois = bbox2roi(proposals) + roi_feats = self.bbox_roi_extractor( + x[:len(self.bbox_roi_extractor.featmap_strides)], rois) + if self.with_shared_head: + roi_feats = self.shared_head(roi_feats) + cls_score, bbox_pred = self.bbox_head(roi_feats) + img_shape = img_meta[0]['img_shape'] + scale_factor = img_meta[0]['scale_factor'] + det_bboxes, det_labels = self.bbox_head.get_det_bboxes( + rois, + cls_score, + bbox_pred, + img_shape, + scale_factor, + rescale=rescale, + cfg=rcnn_test_cfg) + return det_bboxes, det_labels + + def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg): + aug_bboxes = [] + aug_scores = [] + for x, img_meta in zip(feats, img_metas): + # only one image in the batch + img_shape = img_meta[0]['img_shape'] + scale_factor = img_meta[0]['scale_factor'] + flip = img_meta[0]['flip'] + # TODO more flexible + proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, + scale_factor, flip) + rois = bbox2roi([proposals]) + # recompute feature maps to save GPU memory + roi_feats = self.bbox_roi_extractor( + x[:len(self.bbox_roi_extractor.featmap_strides)], rois) + if self.with_shared_head: + roi_feats = self.shared_head(roi_feats) + cls_score, bbox_pred = self.bbox_head(roi_feats) + bboxes, scores = self.bbox_head.get_det_bboxes( + rois, + cls_score, + bbox_pred, + img_shape, + scale_factor, + rescale=False, + cfg=None) + aug_bboxes.append(bboxes) + aug_scores.append(scores) + # after merging, bboxes will be rescaled to the original image size + merged_bboxes, merged_scores = merge_aug_bboxes( + aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) + det_bboxes, det_labels = multiclass_nms( + merged_bboxes, merged_scores, rcnn_test_cfg.score_thr, + rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img) + return det_bboxes, det_labels + + +class MaskTestMixin(object): + + def simple_test_mask(self, + x, + img_meta, + det_bboxes, + det_labels, + rescale=False): + # image shape of the first image in the batch (only one) + ori_shape = img_meta[0]['ori_shape'] + scale_factor = img_meta[0]['scale_factor'] + if det_bboxes.shape[0] == 0: + segm_result = [[] for _ in range(self.mask_head.num_classes - 1)] + else: + # if det_bboxes is rescaled to the original image size, we need to + # rescale it back to the testing scale to obtain RoIs. + _bboxes = ( + det_bboxes[:, :4] * scale_factor if rescale else det_bboxes) + mask_rois = bbox2roi([_bboxes]) + mask_feats = self.mask_roi_extractor( + x[:len(self.mask_roi_extractor.featmap_strides)], mask_rois) + if self.with_shared_head: + mask_feats = self.shared_head(mask_feats) + mask_pred = self.mask_head(mask_feats) + segm_result = self.mask_head.get_seg_masks( + mask_pred, _bboxes, det_labels, self.test_cfg.rcnn, ori_shape, + scale_factor, rescale) + return segm_result + + def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels): + if det_bboxes.shape[0] == 0: + segm_result = [[] for _ in range(self.mask_head.num_classes - 1)] + else: + aug_masks = [] + for x, img_meta in zip(feats, img_metas): + img_shape = img_meta[0]['img_shape'] + scale_factor = img_meta[0]['scale_factor'] + flip = img_meta[0]['flip'] + _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, + scale_factor, flip) + mask_rois = bbox2roi([_bboxes]) + mask_feats = self.mask_roi_extractor( + x[:len(self.mask_roi_extractor.featmap_strides)], + mask_rois) + if self.with_shared_head: + mask_feats = self.shared_head(mask_feats) + mask_pred = self.mask_head(mask_feats) + # convert to numpy array to save memory + aug_masks.append(mask_pred.sigmoid().cpu().numpy()) + merged_masks = merge_aug_masks(aug_masks, img_metas, + self.test_cfg.rcnn) + + ori_shape = img_metas[0][0]['ori_shape'] + segm_result = self.mask_head.get_seg_masks( + merged_masks, + det_bboxes, + det_labels, + self.test_cfg.rcnn, + ori_shape, + scale_factor=1.0, + rescale=False) + return segm_result diff --git a/mmdet/models/detectors/two_stage.py b/mmdet/models/detectors/two_stage.py new file mode 100644 index 0000000..c300476 --- /dev/null +++ b/mmdet/models/detectors/two_stage.py @@ -0,0 +1,240 @@ +import torch +import torch.nn as nn + +from .base import BaseDetector +from .test_mixins import RPNTestMixin, BBoxTestMixin, MaskTestMixin +from .. import builder +from ..registry import DETECTORS +from mmdet.core import bbox2roi, bbox2result, build_assigner, build_sampler + + +@DETECTORS.register_module +class TwoStageDetector(BaseDetector, RPNTestMixin, BBoxTestMixin, + MaskTestMixin): + + def __init__(self, + backbone, + neck=None, + shared_head=None, + rpn_head=None, + bbox_roi_extractor=None, + bbox_head=None, + mask_roi_extractor=None, + mask_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None): + super(TwoStageDetector, self).__init__() + self.backbone = builder.build_backbone(backbone) + + if neck is not None: + self.neck = builder.build_neck(neck) + + if shared_head is not None: + self.shared_head = builder.build_shared_head(shared_head) + + if rpn_head is not None: + self.rpn_head = builder.build_head(rpn_head) + + if bbox_head is not None: + self.bbox_roi_extractor = builder.build_roi_extractor( + bbox_roi_extractor) + self.bbox_head = builder.build_head(bbox_head) + + if mask_head is not None: + if mask_roi_extractor is not None: + self.mask_roi_extractor = builder.build_roi_extractor( + mask_roi_extractor) + self.share_roi_extractor = False + else: + self.share_roi_extractor = True + self.mask_roi_extractor = self.bbox_roi_extractor + self.mask_head = builder.build_head(mask_head) + + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + self.init_weights(pretrained=pretrained) + + @property + def with_rpn(self): + return hasattr(self, 'rpn_head') and self.rpn_head is not None + + def init_weights(self, pretrained=None): + super(TwoStageDetector, self).init_weights(pretrained) + self.backbone.init_weights(pretrained=pretrained) + if self.with_neck: + if isinstance(self.neck, nn.Sequential): + for m in self.neck: + m.init_weights() + else: + self.neck.init_weights() + if self.with_shared_head: + self.shared_head.init_weights(pretrained=pretrained) + if self.with_rpn: + self.rpn_head.init_weights() + if self.with_bbox: + self.bbox_roi_extractor.init_weights() + self.bbox_head.init_weights() + if self.with_mask: + self.mask_head.init_weights() + if not self.share_roi_extractor: + self.mask_roi_extractor.init_weights() + + def extract_feat(self, img): + x = self.backbone(img) + if self.with_neck: + x = self.neck(x) + return x + + def forward_train(self, + img, + img_meta, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + gt_masks=None, + proposals=None): + x = self.extract_feat(img) + + losses = dict() + + # RPN forward and loss + if self.with_rpn: + rpn_outs = self.rpn_head(x) + rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta, + self.train_cfg.rpn) + rpn_losses = self.rpn_head.loss( + *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) + losses.update(rpn_losses) + + proposal_cfg = self.train_cfg.get('rpn_proposal', + self.test_cfg.rpn) + proposal_inputs = rpn_outs + (img_meta, proposal_cfg) + proposal_list = self.rpn_head.get_bboxes(*proposal_inputs) + else: + proposal_list = proposals + + # assign gts and sample proposals + if self.with_bbox or self.with_mask: + bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner) + bbox_sampler = build_sampler( + self.train_cfg.rcnn.sampler, context=self) + num_imgs = img.size(0) + if gt_bboxes_ignore is None: + gt_bboxes_ignore = [None for _ in range(num_imgs)] + sampling_results = [] + for i in range(num_imgs): + assign_result = bbox_assigner.assign( + proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], + gt_labels[i]) + sampling_result = bbox_sampler.sample( + assign_result, + proposal_list[i], + gt_bboxes[i], + gt_labels[i], + feats=[lvl_feat[i][None] for lvl_feat in x]) + sampling_results.append(sampling_result) + + # bbox head forward and loss + if self.with_bbox: + rois = bbox2roi([res.bboxes for res in sampling_results]) + # TODO: a more flexible way to decide which feature maps to use + bbox_feats = self.bbox_roi_extractor( + x[:self.bbox_roi_extractor.num_inputs], rois) + if self.with_shared_head: + bbox_feats = self.shared_head(bbox_feats) + cls_score, bbox_pred = self.bbox_head(bbox_feats) + + bbox_targets = self.bbox_head.get_target( + sampling_results, gt_bboxes, gt_labels, self.train_cfg.rcnn) + loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, + *bbox_targets) + losses.update(loss_bbox) + + # mask head forward and loss + if self.with_mask: + if not self.share_roi_extractor: + pos_rois = bbox2roi( + [res.pos_bboxes for res in sampling_results]) + mask_feats = self.mask_roi_extractor( + x[:self.mask_roi_extractor.num_inputs], pos_rois) + if self.with_shared_head: + mask_feats = self.shared_head(mask_feats) + else: + pos_inds = [] + device = bbox_feats.device + for res in sampling_results: + pos_inds.append( + torch.ones( + res.pos_bboxes.shape[0], + device=device, + dtype=torch.uint8)) + pos_inds.append( + torch.zeros( + res.neg_bboxes.shape[0], + device=device, + dtype=torch.uint8)) + pos_inds = torch.cat(pos_inds) + mask_feats = bbox_feats[pos_inds] + mask_pred = self.mask_head(mask_feats) + + mask_targets = self.mask_head.get_target( + sampling_results, gt_masks, self.train_cfg.rcnn) + pos_labels = torch.cat( + [res.pos_gt_labels for res in sampling_results]) + loss_mask = self.mask_head.loss(mask_pred, mask_targets, + pos_labels) + losses.update(loss_mask) + + return losses + + def simple_test(self, img, img_meta, proposals=None, rescale=False): + """Test without augmentation.""" + assert self.with_bbox, "Bbox head must be implemented." + + x = self.extract_feat(img) + + proposal_list = self.simple_test_rpn( + x, img_meta, self.test_cfg.rpn) if proposals is None else proposals + + det_bboxes, det_labels = self.simple_test_bboxes( + x, img_meta, proposal_list, self.test_cfg.rcnn, rescale=rescale) + bbox_results = bbox2result(det_bboxes, det_labels, + self.bbox_head.num_classes) + + if not self.with_mask: + return bbox_results + else: + segm_results = self.simple_test_mask( + x, img_meta, det_bboxes, det_labels, rescale=rescale) + return bbox_results, segm_results + + def aug_test(self, imgs, img_metas, rescale=False): + """Test with augmentations. + + If rescale is False, then returned bboxes and masks will fit the scale + of imgs[0]. + """ + # recompute feats to save memory + proposal_list = self.aug_test_rpn( + self.extract_feats(imgs), img_metas, self.test_cfg.rpn) + det_bboxes, det_labels = self.aug_test_bboxes( + self.extract_feats(imgs), img_metas, proposal_list, + self.test_cfg.rcnn) + + if rescale: + _det_bboxes = det_bboxes + else: + _det_bboxes = det_bboxes.clone() + _det_bboxes[:, :4] *= img_metas[0][0]['scale_factor'] + bbox_results = bbox2result(_det_bboxes, det_labels, + self.bbox_head.num_classes) + + # det_bboxes always keep the original scale + if self.with_mask: + segm_results = self.aug_test_mask( + self.extract_feats(imgs), img_metas, det_bboxes, det_labels) + return bbox_results, segm_results + else: + return bbox_results diff --git a/mmdet/models/mask_heads/__init__.py b/mmdet/models/mask_heads/__init__.py new file mode 100644 index 0000000..995d67d --- /dev/null +++ b/mmdet/models/mask_heads/__init__.py @@ -0,0 +1,6 @@ +from .fcn_mask_head import FCNMaskHead +from .htc_mask_head import HTCMaskHead +from .fused_semantic_head import FusedSemanticHead +from .grid_head import GridHead + +__all__ = ['FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead'] diff --git a/mmdet/models/mask_heads/fcn_mask_head.py b/mmdet/models/mask_heads/fcn_mask_head.py new file mode 100644 index 0000000..0a5054c --- /dev/null +++ b/mmdet/models/mask_heads/fcn_mask_head.py @@ -0,0 +1,169 @@ +import mmcv +import numpy as np +import pycocotools.mask as mask_util +import torch +import torch.nn as nn + +from ..registry import HEADS +from ..utils import ConvModule +from mmdet.core import mask_cross_entropy, mask_target + + +@HEADS.register_module +class FCNMaskHead(nn.Module): + + def __init__(self, + num_convs=4, + roi_feat_size=14, + in_channels=256, + conv_kernel_size=3, + conv_out_channels=256, + upsample_method='deconv', + upsample_ratio=2, + num_classes=81, + class_agnostic=False, + conv_cfg=None, + norm_cfg=None): + super(FCNMaskHead, self).__init__() + if upsample_method not in [None, 'deconv', 'nearest', 'bilinear']: + raise ValueError( + 'Invalid upsample method {}, accepted methods ' + 'are "deconv", "nearest", "bilinear"'.format(upsample_method)) + self.num_convs = num_convs + self.roi_feat_size = roi_feat_size # WARN: not used and reserved + self.in_channels = in_channels + self.conv_kernel_size = conv_kernel_size + self.conv_out_channels = conv_out_channels + self.upsample_method = upsample_method + self.upsample_ratio = upsample_ratio + self.num_classes = num_classes + self.class_agnostic = class_agnostic + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + self.convs = nn.ModuleList() + for i in range(self.num_convs): + in_channels = ( + self.in_channels if i == 0 else self.conv_out_channels) + padding = (self.conv_kernel_size - 1) // 2 + self.convs.append( + ConvModule( + in_channels, + self.conv_out_channels, + self.conv_kernel_size, + padding=padding, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg)) + upsample_in_channels = ( + self.conv_out_channels if self.num_convs > 0 else in_channels) + if self.upsample_method is None: + self.upsample = None + elif self.upsample_method == 'deconv': + self.upsample = nn.ConvTranspose2d( + upsample_in_channels, + self.conv_out_channels, + self.upsample_ratio, + stride=self.upsample_ratio) + else: + self.upsample = nn.Upsample( + scale_factor=self.upsample_ratio, mode=self.upsample_method) + + out_channels = 1 if self.class_agnostic else self.num_classes + logits_in_channel = ( + self.conv_out_channels + if self.upsample_method == 'deconv' else upsample_in_channels) + self.conv_logits = nn.Conv2d(logits_in_channel, out_channels, 1) + self.relu = nn.ReLU(inplace=True) + self.debug_imgs = None + + def init_weights(self): + for m in [self.upsample, self.conv_logits]: + if m is None: + continue + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu') + nn.init.constant_(m.bias, 0) + + def forward(self, x): + for conv in self.convs: + x = conv(x) + if self.upsample is not None: + x = self.upsample(x) + if self.upsample_method == 'deconv': + x = self.relu(x) + mask_pred = self.conv_logits(x) + return mask_pred + + def get_target(self, sampling_results, gt_masks, rcnn_train_cfg): + pos_proposals = [res.pos_bboxes for res in sampling_results] + pos_assigned_gt_inds = [ + res.pos_assigned_gt_inds for res in sampling_results + ] + mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, + gt_masks, rcnn_train_cfg) + return mask_targets + + def loss(self, mask_pred, mask_targets, labels): + loss = dict() + if self.class_agnostic: + loss_mask = mask_cross_entropy(mask_pred, mask_targets, + torch.zeros_like(labels)) + else: + loss_mask = mask_cross_entropy(mask_pred, mask_targets, labels) + loss['loss_mask'] = loss_mask + return loss + + def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg, + ori_shape, scale_factor, rescale): + """Get segmentation masks from mask_pred and bboxes. + + Args: + mask_pred (Tensor or ndarray): shape (n, #class+1, h, w). + For single-scale testing, mask_pred is the direct output of + model, whose type is Tensor, while for multi-scale testing, + it will be converted to numpy array outside of this method. + det_bboxes (Tensor): shape (n, 4/5) + det_labels (Tensor): shape (n, ) + img_shape (Tensor): shape (3, ) + rcnn_test_cfg (dict): rcnn testing config + ori_shape: original image size + + Returns: + list[list]: encoded masks + """ + if isinstance(mask_pred, torch.Tensor): + mask_pred = mask_pred.sigmoid().cpu().numpy() + assert isinstance(mask_pred, np.ndarray) + + cls_segms = [[] for _ in range(self.num_classes - 1)] + bboxes = det_bboxes.cpu().numpy()[:, :4] + labels = det_labels.cpu().numpy() + 1 + + if rescale: + img_h, img_w = ori_shape[:2] + else: + img_h = np.round(ori_shape[0] * scale_factor).astype(np.int32) + img_w = np.round(ori_shape[1] * scale_factor).astype(np.int32) + scale_factor = 1.0 + + for i in range(bboxes.shape[0]): + bbox = (bboxes[i, :] / scale_factor).astype(np.int32) + label = labels[i] + w = max(bbox[2] - bbox[0] + 1, 1) + h = max(bbox[3] - bbox[1] + 1, 1) + + if not self.class_agnostic: + mask_pred_ = mask_pred[i, label, :, :] + else: + mask_pred_ = mask_pred[i, 0, :, :] + im_mask = np.zeros((img_h, img_w), dtype=np.uint8) + + bbox_mask = mmcv.imresize(mask_pred_, (w, h)) + bbox_mask = (bbox_mask > rcnn_test_cfg.mask_thr_binary).astype( + np.uint8) + im_mask[bbox[1]:bbox[1] + h, bbox[0]:bbox[0] + w] = bbox_mask + rle = mask_util.encode( + np.array(im_mask[:, :, np.newaxis], order='F'))[0] + cls_segms[label - 1].append(rle) + + return cls_segms diff --git a/mmdet/models/mask_heads/fused_semantic_head.py b/mmdet/models/mask_heads/fused_semantic_head.py new file mode 100644 index 0000000..6107423 --- /dev/null +++ b/mmdet/models/mask_heads/fused_semantic_head.py @@ -0,0 +1,102 @@ +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import kaiming_init + +from ..registry import HEADS +from ..utils import ConvModule + + +@HEADS.register_module +class FusedSemanticHead(nn.Module): + """Multi-level fused semantic segmentation head. + + in_1 -> 1x1 conv --- + | + in_2 -> 1x1 conv -- | + || + in_3 -> 1x1 conv - || + ||| /-> 1x1 conv (mask prediction) + in_4 -> 1x1 conv -----> 3x3 convs (*4) + | \-> 1x1 conv (feature) + in_5 -> 1x1 conv --- + """ # noqa: W605 + + def __init__(self, + num_ins, + fusion_level, + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=183, + ignore_label=255, + loss_weight=0.2, + conv_cfg=None, + norm_cfg=None): + super(FusedSemanticHead, self).__init__() + self.num_ins = num_ins + self.fusion_level = fusion_level + self.num_convs = num_convs + self.in_channels = in_channels + self.conv_out_channels = conv_out_channels + self.num_classes = num_classes + self.ignore_label = ignore_label + self.loss_weight = loss_weight + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + self.lateral_convs = nn.ModuleList() + for i in range(self.num_ins): + self.lateral_convs.append( + ConvModule( + self.in_channels, + self.in_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=False)) + + self.convs = nn.ModuleList() + for i in range(self.num_convs): + in_channels = self.in_channels if i == 0 else conv_out_channels + self.convs.append( + ConvModule( + in_channels, + conv_out_channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + self.conv_embedding = ConvModule( + conv_out_channels, + conv_out_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg) + self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1) + + self.criterion = nn.CrossEntropyLoss(ignore_index=ignore_label) + + def init_weights(self): + kaiming_init(self.conv_logits) + + def forward(self, feats): + x = self.lateral_convs[self.fusion_level](feats[self.fusion_level]) + fused_size = tuple(x.shape[-2:]) + for i, feat in enumerate(feats): + if i != self.fusion_level: + feat = F.interpolate( + feat, size=fused_size, mode='bilinear', align_corners=True) + x += self.lateral_convs[i](feat) + + for i in range(self.num_convs): + x = self.convs[i](x) + + mask_pred = self.conv_logits(x) + x = self.conv_embedding(x) + return mask_pred, x + + def loss(self, mask_pred, labels): + labels = labels.squeeze(1).long() + loss_semantic_seg = self.criterion(mask_pred, labels) + loss_semantic_seg *= self.loss_weight + return loss_semantic_seg diff --git a/mmdet/models/mask_heads/grid_head.py b/mmdet/models/mask_heads/grid_head.py new file mode 100644 index 0000000..642243d --- /dev/null +++ b/mmdet/models/mask_heads/grid_head.py @@ -0,0 +1,174 @@ +import mmcv +import numpy as np +import pycocotools.mask as mask_util +import torch +import torch.nn as nn +import functools +import torch.nn.functional as F +from torch.distributed import get_world_size, get_rank +import random + +from ..registry import HEADS +from ..utils import ConvModule +from ..utils import build_norm_layer +from mmdet.core import grid_target, random_jitter, bbox2roi + +@HEADS.register_module +class GridHead(nn.Module): + + def __init__(self, + num_convs=8, + roi_feat_size=14, + in_channels=256, + conv_kernel_size=3, + conv_out_channels=256, + deconv_kernel = 4, + num_grids = 9, + class_agnostic=False, + conv_cfg=None, + norm_cfg=None): + super(GridHead, self).__init__() + self.num_convs = num_convs + self.roi_feat_size = roi_feat_size # WARN: not used and reserved + self.in_channels = in_channels + self.conv_kernel_size = conv_kernel_size + self.conv_out_channels = conv_out_channels + self.class_agnostic = class_agnostic + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.num_grids = num_grids + self.test_mode = False + + self.convs = [] + for i in range(self.num_convs): + in_channels = ( + self.in_channels if i == 0 else self.conv_out_channels) + strides = 2 if i==0 else 1 + padding = (self.conv_kernel_size - 1) // 2 + self.convs.append( + nn.Sequential(nn.Conv2d(in_channels,self.conv_out_channels,self.conv_kernel_size,strides,padding), + nn.GroupNorm(36,self.conv_out_channels), + nn.ReLU(inplace=True))) + self.convs = nn.Sequential(*self.convs) + + planes = self.conv_out_channels + self.single_plane = self.conv_out_channels // num_grids + + self.updeconv1 = nn.ConvTranspose2d(planes,planes,kernel_size=deconv_kernel,stride=2, + padding=(deconv_kernel - 2) // 2, groups=num_grids) + self.sbn1 = nn.GroupNorm(num_grids,planes) + self.updeconv2 = nn.ConvTranspose2d(planes, num_grids, kernel_size=deconv_kernel, stride=2, + padding=(deconv_kernel - 2) // 2, groups=num_grids) + + self.neighborpoint = ((1, 3), (0, 2, 4), (1, 5), (0, 4, 6), (1, 3, 5, 7), (2, 4, 8), (3, 7), (4, 6, 8), (5, 7)) + self.num_edges = functools.reduce(lambda x, y: x + y, map(lambda x: len(x), self.neighborpoint)) + self.firstOrderConvs = [] + self.secondOrderConvs = [] + for _point in self.neighborpoint: + _foc = [nn.Sequential(nn.Conv2d(self.single_plane, self.single_plane, 5, 1, 2,groups=self.single_plane), + nn.Conv2d(self.single_plane,self.single_plane,1,1,0)) for _idx in range(len(_point))] + _soc = [nn.Sequential(nn.Conv2d(self.single_plane, self.single_plane, 5, 1, 2,groups=self.single_plane), + nn.Conv2d(self.single_plane,self.single_plane,1,1,0)) for _idx in range(len(_point))] + self.firstOrderConvs.append(nn.Sequential(*_foc)) + self.secondOrderConvs.append(nn.Sequential(*_soc)) + + self.firstOrderConvs = nn.Sequential(*self.firstOrderConvs) + self.secondOrderConvs = nn.Sequential(*self.secondOrderConvs) + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): + nn.init.kaiming_normal_(m.weight.data) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + for m in self.modules(): + if isinstance(m, nn.ConvTranspose2d): + nn.init.normal_(m.weight.data, std=0.001) + if m.bias is not None: + m.bias.data.zero_() + nn.init.constant_(self.updeconv2.bias,-np.log(0.99/0.01)) + + def forward(self, x): + x = self.convs(x) + + first_order_x = [None] * self.num_grids + for _idx, _point_idx in enumerate(self.neighborpoint): + first_order_x[_idx] = x[:, _idx * self.single_plane:(_idx + 1) * self.single_plane] + for _iidx, _neighbor_idx in enumerate(_point_idx): + first_order_x[_idx] = first_order_x[_idx] + self.firstOrderConvs[_idx][_iidx]( + x[:, _neighbor_idx * self.single_plane:(_neighbor_idx + 1) * self.single_plane]) + + second_order_x = [None] * self.num_grids + for _idx, _point_idx in enumerate(self.neighborpoint): + second_order_x[_idx] = x[:, _idx * self.single_plane:(_idx + 1) * self.single_plane] + for _iidx, _neighbor_idx in enumerate(_point_idx): + second_order_x[_idx] = second_order_x[_idx] + self.secondOrderConvs[_idx][_iidx]( + first_order_x[_neighbor_idx]) + + x2 = torch.cat(second_order_x, dim=1) + x2 = self.updeconv1(x2) + x2 = nn.functional.relu(self.sbn1(x2),inplace=True) + x2 = self.updeconv2(x2) + + if not self.test_mode: + x1 = x + x1 = self.updeconv1(x1) + x1 = nn.functional.relu(self.sbn1(x1),inplace=True) + x1 = self.updeconv2(x1) + else: + x1 = x2 + + return x1, x2 + + def get_target(self, sampling_results, rcnn_train_cfg): + grid_targets = grid_target(sampling_results,rcnn_train_cfg) + return grid_targets + + def loss(self, grid_pred1, grid_pred2, grid_targets): + loss = dict() + grid_loss = F.binary_cross_entropy_with_logits(grid_pred1, grid_targets) + F.binary_cross_entropy_with_logits(grid_pred2,grid_targets) + grid_loss = grid_loss * 15 + + loss['loss_grid'] = grid_loss + return loss + + def get_bboxes(self, det_bboxes, grid_pred, img_meta): + assert(det_bboxes.shape[0] == grid_pred.shape[0]) + det_bboxes = det_bboxes.cpu() + cls_scores = det_bboxes[:,[4]] + det_bboxes = det_bboxes[:,:4] + grid_pred = torch.sigmoid(grid_pred).cpu() + + #expand pos_bboxes + widths = det_bboxes[:,2] - det_bboxes[:,0] + heights = det_bboxes[:,3] - det_bboxes[:,1] + x1 = det_bboxes[:,0] - widths / 2 + y1 = det_bboxes[:,1] - heights / 2 + x2 = det_bboxes[:,2] + widths / 2 + y2 = det_bboxes[:,3] + heights / 2 + + R,C,H,W = grid_pred.shape + grid_pred = grid_pred.view(R*C,H*W) + pred_scores,pred_position = grid_pred.max(dim=1) + + xs = pred_position % W + ys = pred_position // W + base = (0,14,28) + for i in range(9): + xs[i::9] = xs[i::9] + base[i//3] + ys[i::9] = ys[i::9] + base[i%3] + pred_scores,xs,ys = tuple(map(lambda x:x.view(R,C),[pred_scores,xs,ys])) + + grid_points = (xs.float() + 0.5) / (2*W) * widths.view(-1,1) * 2 + x1.view(-1,1), \ + (ys.float() + 0.5) / (2*H) * heights.view(-1,1) * 2 + y1.view(-1,1) + + res_dets_x1 = (grid_points[0][:,[0,1,2]] * pred_scores[:,[0,1,2]]).sum(dim=1,keepdim=True) / (pred_scores[:,[0,1,2]].sum(dim=1,keepdim=True)) + res_dets_y1 = (grid_points[1][:,[0,3,6]] * pred_scores[:,[0,3,6]]).sum(dim=1,keepdim=True) / (pred_scores[:,[0,3,6]].sum(dim=1,keepdim=True)) + res_dets_x2 = (grid_points[0][:,[6,7,8]] * pred_scores[:,[6,7,8]]).sum(dim=1,keepdim=True) / (pred_scores[:,[6,7,8]].sum(dim=1,keepdim=True)) + res_dets_y2 = (grid_points[1][:,[2,5,8]] * pred_scores[:,[2,5,8]]).sum(dim=1,keepdim=True) / (pred_scores[:,[2,5,8]].sum(dim=1,keepdim=True)) + + det_res = torch.cat([res_dets_x1,res_dets_y1,res_dets_x2,res_dets_y2,cls_scores],dim=1) + det_res[:,[0,2]].clamp_(min=0,max=img_meta[0]['img_shape'][1]-1) + det_res[:,[1,3]].clamp_(min=0,max=img_meta[0]['img_shape'][0]-1) + + return det_res diff --git a/mmdet/models/mask_heads/htc_mask_head.py b/mmdet/models/mask_heads/htc_mask_head.py new file mode 100644 index 0000000..9ba3ed7 --- /dev/null +++ b/mmdet/models/mask_heads/htc_mask_head.py @@ -0,0 +1,38 @@ +from .fcn_mask_head import FCNMaskHead +from ..registry import HEADS +from ..utils import ConvModule + + +@HEADS.register_module +class HTCMaskHead(FCNMaskHead): + + def __init__(self, *args, **kwargs): + super(HTCMaskHead, self).__init__(*args, **kwargs) + self.conv_res = ConvModule( + self.conv_out_channels, + self.conv_out_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg) + + def init_weights(self): + super(HTCMaskHead, self).init_weights() + self.conv_res.init_weights() + + def forward(self, x, res_feat=None, return_logits=True, return_feat=True): + if res_feat is not None: + res_feat = self.conv_res(res_feat) + x = x + res_feat + for conv in self.convs: + x = conv(x) + res_feat = x + outs = [] + if return_logits: + x = self.upsample(x) + if self.upsample_method == 'deconv': + x = self.relu(x) + mask_pred = self.conv_logits(x) + outs.append(mask_pred) + if return_feat: + outs.append(res_feat) + return outs if len(outs) > 1 else outs[0] diff --git a/mmdet/models/necks/__init__.py b/mmdet/models/necks/__init__.py new file mode 100644 index 0000000..0093021 --- /dev/null +++ b/mmdet/models/necks/__init__.py @@ -0,0 +1,3 @@ +from .fpn import FPN + +__all__ = ['FPN'] diff --git a/mmdet/models/necks/fpn.py b/mmdet/models/necks/fpn.py new file mode 100644 index 0000000..6b8c862 --- /dev/null +++ b/mmdet/models/necks/fpn.py @@ -0,0 +1,136 @@ +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import xavier_init + +from ..registry import NECKS +from ..utils import ConvModule + + +@NECKS.register_module +class FPN(nn.Module): + + def __init__(self, + in_channels, + out_channels, + num_outs, + start_level=0, + end_level=-1, + add_extra_convs=False, + extra_convs_on_inputs=True, + relu_before_extra_convs=False, + conv_cfg=None, + norm_cfg=None, + activation=None): + super(FPN, self).__init__() + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_ins = len(in_channels) + self.num_outs = num_outs + self.activation = activation + self.relu_before_extra_convs = relu_before_extra_convs + + if end_level == -1: + self.backbone_end_level = self.num_ins + assert num_outs >= self.num_ins - start_level + else: + # if end_level < inputs, no extra level is allowed + self.backbone_end_level = end_level + assert end_level <= len(in_channels) + assert num_outs == end_level - start_level + self.start_level = start_level + self.end_level = end_level + self.add_extra_convs = add_extra_convs + self.extra_convs_on_inputs = extra_convs_on_inputs + + self.lateral_convs = nn.ModuleList() + self.fpn_convs = nn.ModuleList() + + for i in range(self.start_level, self.backbone_end_level): + l_conv = ConvModule( + in_channels[i], + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + activation=self.activation, + inplace=False) + fpn_conv = ConvModule( + out_channels, + out_channels, + 3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + activation=self.activation, + inplace=False) + + self.lateral_convs.append(l_conv) + self.fpn_convs.append(fpn_conv) + + # add extra conv layers (e.g., RetinaNet) + extra_levels = num_outs - self.backbone_end_level + self.start_level + if add_extra_convs and extra_levels >= 1: + for i in range(extra_levels): + if i == 0 and self.extra_convs_on_inputs: + in_channels = self.in_channels[self.backbone_end_level - 1] + else: + in_channels = out_channels + extra_fpn_conv = ConvModule( + in_channels, + out_channels, + 3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + activation=self.activation, + inplace=False) + self.fpn_convs.append(extra_fpn_conv) + + # default init_weights for conv(msra) and norm in ConvModule + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + xavier_init(m, distribution='uniform') + + def forward(self, inputs): + assert len(inputs) == len(self.in_channels) + + # build laterals + laterals = [ + lateral_conv(inputs[i + self.start_level]) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + + # build top-down path + used_backbone_levels = len(laterals) + for i in range(used_backbone_levels - 1, 0, -1): + laterals[i - 1] += F.interpolate( + laterals[i], scale_factor=2, mode='nearest') + + # build outputs + # part 1: from original levels + outs = [ + self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) + ] + # part 2: add extra levels + if self.num_outs > len(outs): + # use max pool to get more levels on top of outputs + # (e.g., Faster R-CNN, Mask R-CNN) + if not self.add_extra_convs: + for i in range(self.num_outs - used_backbone_levels): + outs.append(F.max_pool2d(outs[-1], 1, stride=2)) + # add conv layers on top of original feature maps (RetinaNet) + else: + if self.extra_convs_on_inputs: + orig = inputs[self.backbone_end_level - 1] + outs.append(self.fpn_convs[used_backbone_levels](orig)) + else: + outs.append(self.fpn_convs[used_backbone_levels](outs[-1])) + for i in range(used_backbone_levels + 1, self.num_outs): + if self.relu_before_extra_convs: + outs.append(self.fpn_convs[i](F.relu(outs[-1]))) + else: + outs.append(self.fpn_convs[i](outs[-1])) + return tuple(outs) diff --git a/mmdet/models/registry.py b/mmdet/models/registry.py new file mode 100644 index 0000000..d5f8e67 --- /dev/null +++ b/mmdet/models/registry.py @@ -0,0 +1,44 @@ +import torch.nn as nn + + +class Registry(object): + + def __init__(self, name): + self._name = name + self._module_dict = dict() + + @property + def name(self): + return self._name + + @property + def module_dict(self): + return self._module_dict + + def _register_module(self, module_class): + """Register a module. + + Args: + module (:obj:`nn.Module`): Module to be registered. + """ + if not issubclass(module_class, nn.Module): + raise TypeError( + 'module must be a child of nn.Module, but got {}'.format( + module_class)) + module_name = module_class.__name__ + if module_name in self._module_dict: + raise KeyError('{} is already registered in {}'.format( + module_name, self.name)) + self._module_dict[module_name] = module_class + + def register_module(self, cls): + self._register_module(cls) + return cls + + +BACKBONES = Registry('backbone') +NECKS = Registry('neck') +ROI_EXTRACTORS = Registry('roi_extractor') +SHARED_HEADS = Registry('shared_head') +HEADS = Registry('head') +DETECTORS = Registry('detector') diff --git a/mmdet/models/roi_extractors/__init__.py b/mmdet/models/roi_extractors/__init__.py new file mode 100644 index 0000000..9161708 --- /dev/null +++ b/mmdet/models/roi_extractors/__init__.py @@ -0,0 +1,3 @@ +from .single_level import SingleRoIExtractor + +__all__ = ['SingleRoIExtractor'] diff --git a/mmdet/models/roi_extractors/single_level.py b/mmdet/models/roi_extractors/single_level.py new file mode 100644 index 0000000..32709d5 --- /dev/null +++ b/mmdet/models/roi_extractors/single_level.py @@ -0,0 +1,88 @@ +from __future__ import division + +import torch +import torch.nn as nn + +from mmdet import ops +from ..registry import ROI_EXTRACTORS + + +@ROI_EXTRACTORS.register_module +class SingleRoIExtractor(nn.Module): + """Extract RoI features from a single level feature map. + + If there are mulitple input feature levels, each RoI is mapped to a level + according to its scale. + + Args: + roi_layer (dict): Specify RoI layer type and arguments. + out_channels (int): Output channels of RoI layers. + featmap_strides (int): Strides of input feature maps. + finest_scale (int): Scale threshold of mapping to level 0. + """ + + def __init__(self, + roi_layer, + out_channels, + featmap_strides, + finest_scale=56): + super(SingleRoIExtractor, self).__init__() + self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides) + self.out_channels = out_channels + self.featmap_strides = featmap_strides + self.finest_scale = finest_scale + + @property + def num_inputs(self): + """int: Input feature map levels.""" + return len(self.featmap_strides) + + def init_weights(self): + pass + + def build_roi_layers(self, layer_cfg, featmap_strides): + cfg = layer_cfg.copy() + layer_type = cfg.pop('type') + assert hasattr(ops, layer_type) + layer_cls = getattr(ops, layer_type) + roi_layers = nn.ModuleList( + [layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides]) + return roi_layers + + def map_roi_levels(self, rois, num_levels): + """Map rois to corresponding feature levels by scales. + + - scale < finest_scale: level 0 + - finest_scale <= scale < finest_scale * 2: level 1 + - finest_scale * 2 <= scale < finest_scale * 4: level 2 + - scale >= finest_scale * 4: level 3 + + Args: + rois (Tensor): Input RoIs, shape (k, 5). + num_levels (int): Total level number. + + Returns: + Tensor: Level index (0-based) of each RoI, shape (k, ) + """ + scale = torch.sqrt( + (rois[:, 3] - rois[:, 1] + 1) * (rois[:, 4] - rois[:, 2] + 1)) + target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6)) + target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long() + return target_lvls + + def forward(self, feats, rois): + if len(feats) == 1: + return self.roi_layers[0](feats[0], rois) + + out_size = self.roi_layers[0].out_size + num_levels = len(feats) + target_lvls = self.map_roi_levels(rois, num_levels) + roi_feats = torch.cuda.FloatTensor(rois.size()[0], self.out_channels, + out_size, out_size).fill_(0) + for i in range(num_levels): + inds = target_lvls == i + if inds.any(): + rois_ = rois[inds, :] + roi_feats_t = self.roi_layers[i](feats[i], rois_) + roi_feats[inds] += roi_feats_t + return roi_feats diff --git a/mmdet/models/shared_heads/__init__.py b/mmdet/models/shared_heads/__init__.py new file mode 100644 index 0000000..bbe7014 --- /dev/null +++ b/mmdet/models/shared_heads/__init__.py @@ -0,0 +1,3 @@ +from .res_layer import ResLayer + +__all__ = ['ResLayer'] diff --git a/mmdet/models/shared_heads/res_layer.py b/mmdet/models/shared_heads/res_layer.py new file mode 100644 index 0000000..743c2ee --- /dev/null +++ b/mmdet/models/shared_heads/res_layer.py @@ -0,0 +1,69 @@ +import logging + +import torch.nn as nn +from mmcv.cnn import constant_init, kaiming_init +from mmcv.runner import load_checkpoint + +from ..backbones import ResNet, make_res_layer +from ..registry import SHARED_HEADS + + +@SHARED_HEADS.register_module +class ResLayer(nn.Module): + + def __init__(self, + depth, + stage=3, + stride=2, + dilation=1, + style='pytorch', + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + with_cp=False, + dcn=None): + super(ResLayer, self).__init__() + self.norm_eval = norm_eval + self.norm_cfg = norm_cfg + self.stage = stage + block, stage_blocks = ResNet.arch_settings[depth] + stage_block = stage_blocks[stage] + planes = 64 * 2**stage + inplanes = 64 * 2**(stage - 1) * block.expansion + + res_layer = make_res_layer( + block, + inplanes, + planes, + stage_block, + stride=stride, + dilation=dilation, + style=style, + with_cp=with_cp, + norm_cfg=self.norm_cfg, + dcn=dcn) + self.add_module('layer{}'.format(stage + 1), res_layer) + + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = logging.getLogger() + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, nn.BatchNorm2d): + constant_init(m, 1) + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + res_layer = getattr(self, 'layer{}'.format(self.stage + 1)) + out = res_layer(x) + return out + + def train(self, mode=True): + super(ResLayer, self).train(mode) + if self.norm_eval: + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() diff --git a/mmdet/models/utils/__init__.py b/mmdet/models/utils/__init__.py new file mode 100644 index 0000000..f9215c5 --- /dev/null +++ b/mmdet/models/utils/__init__.py @@ -0,0 +1,12 @@ +from .conv_ws import conv_ws_2d, ConvWS2d +from .conv_module import build_conv_layer, ConvModule +from .norm import build_norm_layer +from .scale import Scale +from .weight_init import (xavier_init, normal_init, uniform_init, kaiming_init, + bias_init_with_prob) + +__all__ = [ + 'conv_ws_2d', 'ConvWS2d', 'build_conv_layer', 'ConvModule', + 'build_norm_layer', 'xavier_init', 'normal_init', 'uniform_init', + 'kaiming_init', 'bias_init_with_prob', 'Scale' +] diff --git a/mmdet/models/utils/conv_module.py b/mmdet/models/utils/conv_module.py new file mode 100644 index 0000000..623d691 --- /dev/null +++ b/mmdet/models/utils/conv_module.py @@ -0,0 +1,163 @@ +import warnings + +import torch.nn as nn +from mmcv.cnn import kaiming_init, constant_init + +from .conv_ws import ConvWS2d +from .norm import build_norm_layer + +conv_cfg = { + 'Conv': nn.Conv2d, + 'ConvWS': ConvWS2d, + # TODO: octave conv +} + + +def build_conv_layer(cfg, *args, **kwargs): + """ Build convolution layer + + Args: + cfg (None or dict): cfg should contain: + type (str): identify conv layer type. + layer args: args needed to instantiate a conv layer. + + Returns: + layer (nn.Module): created conv layer + """ + if cfg is None: + cfg_ = dict(type='Conv') + else: + assert isinstance(cfg, dict) and 'type' in cfg + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + if layer_type not in conv_cfg: + raise KeyError('Unrecognized norm type {}'.format(layer_type)) + else: + conv_layer = conv_cfg[layer_type] + + layer = conv_layer(*args, **kwargs, **cfg_) + + return layer + + +class ConvModule(nn.Module): + """Conv-Norm-Activation block. + + Args: + in_channels (int): Same as nn.Conv2d. + out_channels (int): Same as nn.Conv2d. + kernel_size (int or tuple[int]): Same as nn.Conv2d. + stride (int or tuple[int]): Same as nn.Conv2d. + padding (int or tuple[int]): Same as nn.Conv2d. + dilation (int or tuple[int]): Same as nn.Conv2d. + groups (int): Same as nn.Conv2d. + bias (bool or str): If specified as `auto`, it will be decided by the + norm_cfg. Bias will be set as True if norm_cfg is None, otherwise + False. + conv_cfg (dict): Config dict for convolution layer. + norm_cfg (dict): Config dict for normalization layer. + activation (str or None): Activation type, "ReLU" by default. + inplace (bool): Whether to use inplace mode for activation. + activate_last (bool): Whether to apply the activation layer in the + last. (Do not use this flag since the behavior and api may be + changed in the future.) + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias='auto', + conv_cfg=None, + norm_cfg=None, + activation='relu', + inplace=True, + activate_last=True): + super(ConvModule, self).__init__() + assert conv_cfg is None or isinstance(conv_cfg, dict) + assert norm_cfg is None or isinstance(norm_cfg, dict) + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.activation = activation + self.inplace = inplace + self.activate_last = activate_last + + self.with_norm = norm_cfg is not None + self.with_activatation = activation is not None + # if the conv layer is before a norm layer, bias is unnecessary. + if bias == 'auto': + bias = False if self.with_norm else True + self.with_bias = bias + + if self.with_norm and self.with_bias: + warnings.warn('ConvModule has norm and bias at the same time') + + # build convolution layer + self.conv = build_conv_layer( + conv_cfg, + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias) + # export the attributes of self.conv to a higher level for convenience + self.in_channels = self.conv.in_channels + self.out_channels = self.conv.out_channels + self.kernel_size = self.conv.kernel_size + self.stride = self.conv.stride + self.padding = self.conv.padding + self.dilation = self.conv.dilation + self.transposed = self.conv.transposed + self.output_padding = self.conv.output_padding + self.groups = self.conv.groups + + # build normalization layers + if self.with_norm: + norm_channels = out_channels if self.activate_last else in_channels + self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels) + self.add_module(self.norm_name, norm) + + # build activation layer + if self.with_activatation: + if self.activation not in ['relu']: + raise ValueError('{} is currently not supported.'.format( + self.activation)) + if self.activation == 'relu': + self.activate = nn.ReLU(inplace=inplace) + + # Use msra init by default + self.init_weights() + + @property + def norm(self): + return getattr(self, self.norm_name) + + def init_weights(self): + nonlinearity = 'relu' if self.activation is None else self.activation + kaiming_init(self.conv, nonlinearity=nonlinearity) + if self.with_norm: + constant_init(self.norm, 1, bias=0) + + def forward(self, x, activate=True, norm=True): + if self.activate_last: + x = self.conv(x) + if norm and self.with_norm: + x = self.norm(x) + if activate and self.with_activatation: + x = self.activate(x) + else: + # WARN: this may be removed or modified + if norm and self.with_norm: + x = self.norm(x) + if activate and self.with_activatation: + x = self.activate(x) + x = self.conv(x) + return x diff --git a/mmdet/models/utils/conv_ws.py b/mmdet/models/utils/conv_ws.py new file mode 100644 index 0000000..5ccd735 --- /dev/null +++ b/mmdet/models/utils/conv_ws.py @@ -0,0 +1,46 @@ +import torch.nn as nn +import torch.nn.functional as F + + +def conv_ws_2d(input, + weight, + bias=None, + stride=1, + padding=0, + dilation=1, + groups=1, + eps=1e-5): + c_in = weight.size(0) + weight_flat = weight.view(c_in, -1) + mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1) + std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1) + weight = (weight - mean) / (std + eps) + return F.conv2d(input, weight, bias, stride, padding, dilation, groups) + + +class ConvWS2d(nn.Conv2d): + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True, + eps=1e-5): + super(ConvWS2d, self).__init__( + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias) + self.eps = eps + + def forward(self, x): + return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding, + self.dilation, self.groups, self.eps) diff --git a/mmdet/models/utils/norm.py b/mmdet/models/utils/norm.py new file mode 100644 index 0000000..d5687cb --- /dev/null +++ b/mmdet/models/utils/norm.py @@ -0,0 +1,55 @@ +import torch.nn as nn + +norm_cfg = { + # format: layer_type: (abbreviation, module) + 'BN': ('bn', nn.BatchNorm2d), + 'SyncBN': ('bn', nn.SyncBatchNorm), + 'GN': ('gn', nn.GroupNorm), + # and potentially 'SN' +} + + +def build_norm_layer(cfg, num_features, postfix=''): + """ Build normalization layer + + Args: + cfg (dict): cfg should contain: + type (str): identify norm layer type. + layer args: args needed to instantiate a norm layer. + requires_grad (bool): [optional] whether stop gradient updates + num_features (int): number of channels from input. + postfix (int, str): appended into norm abbreviation to + create named layer. + + Returns: + name (str): abbreviation + postfix + layer (nn.Module): created norm layer + """ + assert isinstance(cfg, dict) and 'type' in cfg + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + if layer_type not in norm_cfg: + raise KeyError('Unrecognized norm type {}'.format(layer_type)) + else: + abbr, norm_layer = norm_cfg[layer_type] + if norm_layer is None: + raise NotImplementedError + + assert isinstance(postfix, (int, str)) + name = abbr + str(postfix) + + requires_grad = cfg_.pop('requires_grad', True) + cfg_.setdefault('eps', 1e-5) + if layer_type != 'GN': + layer = norm_layer(num_features, **cfg_) + if layer_type == 'SyncBN': + layer._specify_ddp_gpu_num(1) + else: + assert 'num_groups' in cfg_ + layer = norm_layer(num_channels=num_features, **cfg_) + + for param in layer.parameters(): + param.requires_grad = requires_grad + + return name, layer diff --git a/mmdet/models/utils/scale.py b/mmdet/models/utils/scale.py new file mode 100644 index 0000000..68c37cd --- /dev/null +++ b/mmdet/models/utils/scale.py @@ -0,0 +1,12 @@ +import torch +import torch.nn as nn + + +class Scale(nn.Module): + + def __init__(self, scale=1.0): + super(Scale, self).__init__() + self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float)) + + def forward(self, x): + return x * self.scale diff --git a/mmdet/models/utils/weight_init.py b/mmdet/models/utils/weight_init.py new file mode 100644 index 0000000..17d4988 --- /dev/null +++ b/mmdet/models/utils/weight_init.py @@ -0,0 +1,46 @@ +import numpy as np +import torch.nn as nn + + +def xavier_init(module, gain=1, bias=0, distribution='normal'): + assert distribution in ['uniform', 'normal'] + if distribution == 'uniform': + nn.init.xavier_uniform_(module.weight, gain=gain) + else: + nn.init.xavier_normal_(module.weight, gain=gain) + if hasattr(module, 'bias'): + nn.init.constant_(module.bias, bias) + + +def normal_init(module, mean=0, std=1, bias=0): + nn.init.normal_(module.weight, mean, std) + if hasattr(module, 'bias'): + nn.init.constant_(module.bias, bias) + + +def uniform_init(module, a=0, b=1, bias=0): + nn.init.uniform_(module.weight, a, b) + if hasattr(module, 'bias'): + nn.init.constant_(module.bias, bias) + + +def kaiming_init(module, + mode='fan_out', + nonlinearity='relu', + bias=0, + distribution='normal'): + assert distribution in ['uniform', 'normal'] + if distribution == 'uniform': + nn.init.kaiming_uniform_( + module.weight, mode=mode, nonlinearity=nonlinearity) + else: + nn.init.kaiming_normal_( + module.weight, mode=mode, nonlinearity=nonlinearity) + if hasattr(module, 'bias'): + nn.init.constant_(module.bias, bias) + + +def bias_init_with_prob(prior_prob): + """ initialize conv/fc bias value according to giving probablity""" + bias_init = float(-np.log((1 - prior_prob) / prior_prob)) + return bias_init diff --git a/mmdet/ops/__init__.py b/mmdet/ops/__init__.py new file mode 100644 index 0000000..b3cbc26 --- /dev/null +++ b/mmdet/ops/__init__.py @@ -0,0 +1,16 @@ +from .dcn import (DeformConv, DeformConvPack, ModulatedDeformConv, + ModulatedDeformConvPack, DeformRoIPooling, + DeformRoIPoolingPack, ModulatedDeformRoIPoolingPack, + deform_conv, modulated_deform_conv, deform_roi_pooling) +from .nms import nms, soft_nms +from .roi_align import RoIAlign, roi_align +from .roi_pool import RoIPool, roi_pool +from .sigmoid_focal_loss import SigmoidFocalLoss, sigmoid_focal_loss + +__all__ = [ + 'nms', 'soft_nms', 'RoIAlign', 'roi_align', 'RoIPool', 'roi_pool', + 'DeformConv', 'DeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack', + 'ModulatedDeformRoIPoolingPack', 'ModulatedDeformConv', + 'ModulatedDeformConvPack', 'deform_conv', 'modulated_deform_conv', + 'deform_roi_pooling', 'SigmoidFocalLoss', 'sigmoid_focal_loss' +] diff --git a/mmdet/ops/dcn/__init__.py b/mmdet/ops/dcn/__init__.py new file mode 100644 index 0000000..165e637 --- /dev/null +++ b/mmdet/ops/dcn/__init__.py @@ -0,0 +1,13 @@ +from .functions.deform_conv import deform_conv, modulated_deform_conv +from .functions.deform_pool import deform_roi_pooling +from .modules.deform_conv import (DeformConv, ModulatedDeformConv, + DeformConvPack, ModulatedDeformConvPack) +from .modules.deform_pool import (DeformRoIPooling, DeformRoIPoolingPack, + ModulatedDeformRoIPoolingPack) + +__all__ = [ + 'DeformConv', 'DeformConvPack', 'ModulatedDeformConv', + 'ModulatedDeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack', + 'ModulatedDeformRoIPoolingPack', 'deform_conv', 'modulated_deform_conv', + 'deform_roi_pooling' +] diff --git a/mmdet/ops/dcn/functions/__init__.py b/mmdet/ops/dcn/functions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mmdet/ops/dcn/functions/deform_conv.py b/mmdet/ops/dcn/functions/deform_conv.py new file mode 100644 index 0000000..6af75a7 --- /dev/null +++ b/mmdet/ops/dcn/functions/deform_conv.py @@ -0,0 +1,181 @@ +import torch +from torch.autograd import Function +from torch.nn.modules.utils import _pair + +from .. import deform_conv_cuda + + +class DeformConvFunction(Function): + + @staticmethod + def forward(ctx, + input, + offset, + weight, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + im2col_step=64): + if input is not None and input.dim() != 4: + raise ValueError( + "Expected 4D tensor as input, got {}D tensor instead.".format( + input.dim())) + ctx.stride = _pair(stride) + ctx.padding = _pair(padding) + ctx.dilation = _pair(dilation) + ctx.groups = groups + ctx.deformable_groups = deformable_groups + ctx.im2col_step = im2col_step + + ctx.save_for_backward(input, offset, weight) + + output = input.new_empty( + DeformConvFunction._output_size(input, weight, ctx.padding, + ctx.dilation, ctx.stride)) + + ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones + + if not input.is_cuda: + raise NotImplementedError + else: + cur_im2col_step = min(ctx.im2col_step, input.shape[0]) + assert (input.shape[0] % + cur_im2col_step) == 0, 'im2col step must divide batchsize' + deform_conv_cuda.deform_conv_forward_cuda( + input, weight, offset, output, ctx.bufs_[0], ctx.bufs_[1], + weight.size(3), weight.size(2), ctx.stride[1], ctx.stride[0], + ctx.padding[1], ctx.padding[0], ctx.dilation[1], + ctx.dilation[0], ctx.groups, ctx.deformable_groups, + cur_im2col_step) + return output + + @staticmethod + def backward(ctx, grad_output): + input, offset, weight = ctx.saved_tensors + + grad_input = grad_offset = grad_weight = None + + if not grad_output.is_cuda: + raise NotImplementedError + else: + cur_im2col_step = min(ctx.im2col_step, input.shape[0]) + assert (input.shape[0] % + cur_im2col_step) == 0, 'im2col step must divide batchsize' + + if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]: + grad_input = torch.zeros_like(input) + grad_offset = torch.zeros_like(offset) + deform_conv_cuda.deform_conv_backward_input_cuda( + input, offset, grad_output, grad_input, + grad_offset, weight, ctx.bufs_[0], weight.size(3), + weight.size(2), ctx.stride[1], ctx.stride[0], + ctx.padding[1], ctx.padding[0], ctx.dilation[1], + ctx.dilation[0], ctx.groups, ctx.deformable_groups, + cur_im2col_step) + + if ctx.needs_input_grad[2]: + grad_weight = torch.zeros_like(weight) + deform_conv_cuda.deform_conv_backward_parameters_cuda( + input, offset, grad_output, + grad_weight, ctx.bufs_[0], ctx.bufs_[1], weight.size(3), + weight.size(2), ctx.stride[1], ctx.stride[0], + ctx.padding[1], ctx.padding[0], ctx.dilation[1], + ctx.dilation[0], ctx.groups, ctx.deformable_groups, 1, + cur_im2col_step) + + return (grad_input, grad_offset, grad_weight, None, None, None, None, + None) + + @staticmethod + def _output_size(input, weight, padding, dilation, stride): + channels = weight.size(0) + output_size = (input.size(0), channels) + for d in range(input.dim() - 2): + in_size = input.size(d + 2) + pad = padding[d] + kernel = dilation[d] * (weight.size(d + 2) - 1) + 1 + stride_ = stride[d] + output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, ) + if not all(map(lambda s: s > 0, output_size)): + raise ValueError( + "convolution input is too small (output would be {})".format( + 'x'.join(map(str, output_size)))) + return output_size + + +class ModulatedDeformConvFunction(Function): + + @staticmethod + def forward(ctx, + input, + offset, + mask, + weight, + bias=None, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1): + ctx.stride = stride + ctx.padding = padding + ctx.dilation = dilation + ctx.groups = groups + ctx.deformable_groups = deformable_groups + ctx.with_bias = bias is not None + if not ctx.with_bias: + bias = input.new_empty(1) # fake tensor + if not input.is_cuda: + raise NotImplementedError + if weight.requires_grad or mask.requires_grad or offset.requires_grad \ + or input.requires_grad: + ctx.save_for_backward(input, offset, mask, weight, bias) + output = input.new_empty( + ModulatedDeformConvFunction._infer_shape(ctx, input, weight)) + ctx._bufs = [input.new_empty(0), input.new_empty(0)] + deform_conv_cuda.modulated_deform_conv_cuda_forward( + input, weight, bias, ctx._bufs[0], offset, mask, output, + ctx._bufs[1], weight.shape[2], weight.shape[3], ctx.stride, + ctx.stride, ctx.padding, ctx.padding, ctx.dilation, ctx.dilation, + ctx.groups, ctx.deformable_groups, ctx.with_bias) + return output + + @staticmethod + def backward(ctx, grad_output): + if not grad_output.is_cuda: + raise NotImplementedError + input, offset, mask, weight, bias = ctx.saved_tensors + grad_input = torch.zeros_like(input) + grad_offset = torch.zeros_like(offset) + grad_mask = torch.zeros_like(mask) + grad_weight = torch.zeros_like(weight) + grad_bias = torch.zeros_like(bias) + deform_conv_cuda.modulated_deform_conv_cuda_backward( + input, weight, bias, ctx._bufs[0], offset, mask, ctx._bufs[1], + grad_input, grad_weight, grad_bias, grad_offset, grad_mask, + grad_output, weight.shape[2], weight.shape[3], ctx.stride, + ctx.stride, ctx.padding, ctx.padding, ctx.dilation, ctx.dilation, + ctx.groups, ctx.deformable_groups, ctx.with_bias) + if not ctx.with_bias: + grad_bias = None + + return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias, + None, None, None, None, None) + + @staticmethod + def _infer_shape(ctx, input, weight): + n = input.size(0) + channels_out = weight.size(0) + height, width = input.shape[2:4] + kernel_h, kernel_w = weight.shape[2:4] + height_out = (height + 2 * ctx.padding - + (ctx.dilation * (kernel_h - 1) + 1)) // ctx.stride + 1 + width_out = (width + 2 * ctx.padding - + (ctx.dilation * (kernel_w - 1) + 1)) // ctx.stride + 1 + return n, channels_out, height_out, width_out + + +deform_conv = DeformConvFunction.apply +modulated_deform_conv = ModulatedDeformConvFunction.apply diff --git a/mmdet/ops/dcn/functions/deform_pool.py b/mmdet/ops/dcn/functions/deform_pool.py new file mode 100644 index 0000000..65ff0ef --- /dev/null +++ b/mmdet/ops/dcn/functions/deform_pool.py @@ -0,0 +1,69 @@ +import torch +from torch.autograd import Function + +from .. import deform_pool_cuda + + +class DeformRoIPoolingFunction(Function): + + @staticmethod + def forward(ctx, + data, + rois, + offset, + spatial_scale, + out_size, + out_channels, + no_trans, + group_size=1, + part_size=None, + sample_per_part=4, + trans_std=.0): + ctx.spatial_scale = spatial_scale + ctx.out_size = out_size + ctx.out_channels = out_channels + ctx.no_trans = no_trans + ctx.group_size = group_size + ctx.part_size = out_size if part_size is None else part_size + ctx.sample_per_part = sample_per_part + ctx.trans_std = trans_std + + assert 0.0 <= ctx.trans_std <= 1.0 + if not data.is_cuda: + raise NotImplementedError + + n = rois.shape[0] + output = data.new_empty(n, out_channels, out_size, out_size) + output_count = data.new_empty(n, out_channels, out_size, out_size) + deform_pool_cuda.deform_psroi_pooling_cuda_forward( + data, rois, offset, output, output_count, ctx.no_trans, + ctx.spatial_scale, ctx.out_channels, ctx.group_size, ctx.out_size, + ctx.part_size, ctx.sample_per_part, ctx.trans_std) + + if data.requires_grad or rois.requires_grad or offset.requires_grad: + ctx.save_for_backward(data, rois, offset) + ctx.output_count = output_count + + return output + + @staticmethod + def backward(ctx, grad_output): + if not grad_output.is_cuda: + raise NotImplementedError + + data, rois, offset = ctx.saved_tensors + output_count = ctx.output_count + grad_input = torch.zeros_like(data) + grad_rois = None + grad_offset = torch.zeros_like(offset) + + deform_pool_cuda.deform_psroi_pooling_cuda_backward( + grad_output, data, rois, offset, output_count, grad_input, + grad_offset, ctx.no_trans, ctx.spatial_scale, ctx.out_channels, + ctx.group_size, ctx.out_size, ctx.part_size, ctx.sample_per_part, + ctx.trans_std) + return (grad_input, grad_rois, grad_offset, None, None, None, None, + None, None, None, None) + + +deform_roi_pooling = DeformRoIPoolingFunction.apply diff --git a/mmdet/ops/dcn/modules/__init__.py b/mmdet/ops/dcn/modules/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mmdet/ops/dcn/modules/deform_conv.py b/mmdet/ops/dcn/modules/deform_conv.py new file mode 100644 index 0000000..50d15d1 --- /dev/null +++ b/mmdet/ops/dcn/modules/deform_conv.py @@ -0,0 +1,157 @@ +import math + +import torch +import torch.nn as nn +from torch.nn.modules.utils import _pair + +from ..functions.deform_conv import deform_conv, modulated_deform_conv + + +class DeformConv(nn.Module): + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + bias=False): + super(DeformConv, self).__init__() + + assert not bias + assert in_channels % groups == 0, \ + 'in_channels {} cannot be divisible by groups {}'.format( + in_channels, groups) + assert out_channels % groups == 0, \ + 'out_channels {} cannot be divisible by groups {}'.format( + out_channels, groups) + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.stride = _pair(stride) + self.padding = _pair(padding) + self.dilation = _pair(dilation) + self.groups = groups + self.deformable_groups = deformable_groups + + self.weight = nn.Parameter( + torch.Tensor(out_channels, in_channels // self.groups, + *self.kernel_size)) + + self.reset_parameters() + + def reset_parameters(self): + n = self.in_channels + for k in self.kernel_size: + n *= k + stdv = 1. / math.sqrt(n) + self.weight.data.uniform_(-stdv, stdv) + + def forward(self, x, offset): + return deform_conv(x, offset, self.weight, self.stride, self.padding, + self.dilation, self.groups, self.deformable_groups) + + +class DeformConvPack(DeformConv): + + def __init__(self, *args, **kwargs): + super(DeformConvPack, self).__init__(*args, **kwargs) + + self.conv_offset = nn.Conv2d( + self.in_channels, + self.deformable_groups * 2 * self.kernel_size[0] * + self.kernel_size[1], + kernel_size=self.kernel_size, + stride=_pair(self.stride), + padding=_pair(self.padding), + bias=True) + self.init_offset() + + def init_offset(self): + self.conv_offset.weight.data.zero_() + self.conv_offset.bias.data.zero_() + + def forward(self, x): + offset = self.conv_offset(x) + return deform_conv(x, offset, self.weight, self.stride, self.padding, + self.dilation, self.groups, self.deformable_groups) + + +class ModulatedDeformConv(nn.Module): + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + bias=True): + super(ModulatedDeformConv, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.stride = stride + self.padding = padding + self.dilation = dilation + self.groups = groups + self.deformable_groups = deformable_groups + self.with_bias = bias + + self.weight = nn.Parameter( + torch.Tensor(out_channels, in_channels // groups, + *self.kernel_size)) + if bias: + self.bias = nn.Parameter(torch.Tensor(out_channels)) + else: + self.register_parameter('bias', None) + self.reset_parameters() + + def reset_parameters(self): + n = self.in_channels + for k in self.kernel_size: + n *= k + stdv = 1. / math.sqrt(n) + self.weight.data.uniform_(-stdv, stdv) + if self.bias is not None: + self.bias.data.zero_() + + def forward(self, x, offset, mask): + return modulated_deform_conv(x, offset, mask, self.weight, self.bias, + self.stride, self.padding, self.dilation, + self.groups, self.deformable_groups) + + +class ModulatedDeformConvPack(ModulatedDeformConv): + + def __init__(self, *args, **kwargs): + super(ModulatedDeformConvPack, self).__init__(*args, **kwargs) + + self.conv_offset_mask = nn.Conv2d( + self.in_channels, + self.deformable_groups * 3 * self.kernel_size[0] * + self.kernel_size[1], + kernel_size=self.kernel_size, + stride=_pair(self.stride), + padding=_pair(self.padding), + bias=True) + self.init_offset() + + def init_offset(self): + self.conv_offset_mask.weight.data.zero_() + self.conv_offset_mask.bias.data.zero_() + + def forward(self, x): + out = self.conv_offset_mask(x) + o1, o2, mask = torch.chunk(out, 3, dim=1) + offset = torch.cat((o1, o2), dim=1) + mask = torch.sigmoid(mask) + return modulated_deform_conv(x, offset, mask, self.weight, self.bias, + self.stride, self.padding, self.dilation, + self.groups, self.deformable_groups) diff --git a/mmdet/ops/dcn/modules/deform_pool.py b/mmdet/ops/dcn/modules/deform_pool.py new file mode 100644 index 0000000..5e01967 --- /dev/null +++ b/mmdet/ops/dcn/modules/deform_pool.py @@ -0,0 +1,172 @@ +from torch import nn + +from ..functions.deform_pool import deform_roi_pooling + + +class DeformRoIPooling(nn.Module): + + def __init__(self, + spatial_scale, + out_size, + out_channels, + no_trans, + group_size=1, + part_size=None, + sample_per_part=4, + trans_std=.0): + super(DeformRoIPooling, self).__init__() + self.spatial_scale = spatial_scale + self.out_size = out_size + self.out_channels = out_channels + self.no_trans = no_trans + self.group_size = group_size + self.part_size = out_size if part_size is None else part_size + self.sample_per_part = sample_per_part + self.trans_std = trans_std + + def forward(self, data, rois, offset): + if self.no_trans: + offset = data.new_empty(0) + return deform_roi_pooling( + data, rois, offset, self.spatial_scale, self.out_size, + self.out_channels, self.no_trans, self.group_size, self.part_size, + self.sample_per_part, self.trans_std) + + +class DeformRoIPoolingPack(DeformRoIPooling): + + def __init__(self, + spatial_scale, + out_size, + out_channels, + no_trans, + group_size=1, + part_size=None, + sample_per_part=4, + trans_std=.0, + num_offset_fcs=3, + deform_fc_channels=1024): + super(DeformRoIPoolingPack, + self).__init__(spatial_scale, out_size, out_channels, no_trans, + group_size, part_size, sample_per_part, trans_std) + + self.num_offset_fcs = num_offset_fcs + self.deform_fc_channels = deform_fc_channels + + if not no_trans: + seq = [] + ic = self.out_size * self.out_size * self.out_channels + for i in range(self.num_offset_fcs): + if i < self.num_offset_fcs - 1: + oc = self.deform_fc_channels + else: + oc = self.out_size * self.out_size * 2 + seq.append(nn.Linear(ic, oc)) + ic = oc + if i < self.num_offset_fcs - 1: + seq.append(nn.ReLU(inplace=True)) + self.offset_fc = nn.Sequential(*seq) + self.offset_fc[-1].weight.data.zero_() + self.offset_fc[-1].bias.data.zero_() + + def forward(self, data, rois): + assert data.size(1) == self.out_channels + if self.no_trans: + offset = data.new_empty(0) + return deform_roi_pooling( + data, rois, offset, self.spatial_scale, self.out_size, + self.out_channels, self.no_trans, self.group_size, + self.part_size, self.sample_per_part, self.trans_std) + else: + n = rois.shape[0] + offset = data.new_empty(0) + x = deform_roi_pooling(data, rois, offset, self.spatial_scale, + self.out_size, self.out_channels, True, + self.group_size, self.part_size, + self.sample_per_part, self.trans_std) + offset = self.offset_fc(x.view(n, -1)) + offset = offset.view(n, 2, self.out_size, self.out_size) + return deform_roi_pooling( + data, rois, offset, self.spatial_scale, self.out_size, + self.out_channels, self.no_trans, self.group_size, + self.part_size, self.sample_per_part, self.trans_std) + + +class ModulatedDeformRoIPoolingPack(DeformRoIPooling): + + def __init__(self, + spatial_scale, + out_size, + out_channels, + no_trans, + group_size=1, + part_size=None, + sample_per_part=4, + trans_std=.0, + num_offset_fcs=3, + num_mask_fcs=2, + deform_fc_channels=1024): + super(ModulatedDeformRoIPoolingPack, self).__init__( + spatial_scale, out_size, out_channels, no_trans, group_size, + part_size, sample_per_part, trans_std) + + self.num_offset_fcs = num_offset_fcs + self.num_mask_fcs = num_mask_fcs + self.deform_fc_channels = deform_fc_channels + + if not no_trans: + offset_fc_seq = [] + ic = self.out_size * self.out_size * self.out_channels + for i in range(self.num_offset_fcs): + if i < self.num_offset_fcs - 1: + oc = self.deform_fc_channels + else: + oc = self.out_size * self.out_size * 2 + offset_fc_seq.append(nn.Linear(ic, oc)) + ic = oc + if i < self.num_offset_fcs - 1: + offset_fc_seq.append(nn.ReLU(inplace=True)) + self.offset_fc = nn.Sequential(*offset_fc_seq) + self.offset_fc[-1].weight.data.zero_() + self.offset_fc[-1].bias.data.zero_() + + mask_fc_seq = [] + ic = self.out_size * self.out_size * self.out_channels + for i in range(self.num_mask_fcs): + if i < self.num_mask_fcs - 1: + oc = self.deform_fc_channels + else: + oc = self.out_size * self.out_size + mask_fc_seq.append(nn.Linear(ic, oc)) + ic = oc + if i < self.num_mask_fcs - 1: + mask_fc_seq.append(nn.ReLU(inplace=True)) + else: + mask_fc_seq.append(nn.Sigmoid()) + self.mask_fc = nn.Sequential(*mask_fc_seq) + self.mask_fc[-2].weight.data.zero_() + self.mask_fc[-2].bias.data.zero_() + + def forward(self, data, rois): + assert data.size(1) == self.out_channels + if self.no_trans: + offset = data.new_empty(0) + return deform_roi_pooling( + data, rois, offset, self.spatial_scale, self.out_size, + self.out_channels, self.no_trans, self.group_size, + self.part_size, self.sample_per_part, self.trans_std) + else: + n = rois.shape[0] + offset = data.new_empty(0) + x = deform_roi_pooling(data, rois, offset, self.spatial_scale, + self.out_size, self.out_channels, True, + self.group_size, self.part_size, + self.sample_per_part, self.trans_std) + offset = self.offset_fc(x.view(n, -1)) + offset = offset.view(n, 2, self.out_size, self.out_size) + mask = self.mask_fc(x.view(n, -1)) + mask = mask.view(n, 1, self.out_size, self.out_size) + return deform_roi_pooling( + data, rois, offset, self.spatial_scale, self.out_size, + self.out_channels, self.no_trans, self.group_size, + self.part_size, self.sample_per_part, self.trans_std) * mask diff --git a/mmdet/ops/dcn/setup.py b/mmdet/ops/dcn/setup.py new file mode 100644 index 0000000..9638018 --- /dev/null +++ b/mmdet/ops/dcn/setup.py @@ -0,0 +1,15 @@ +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + +setup( + name='deform_conv', + ext_modules=[ + CUDAExtension('deform_conv_cuda', [ + 'src/deform_conv_cuda.cpp', + 'src/deform_conv_cuda_kernel.cu', + ]), + CUDAExtension( + 'deform_pool_cuda', + ['src/deform_pool_cuda.cpp', 'src/deform_pool_cuda_kernel.cu']), + ], + cmdclass={'build_ext': BuildExtension}) diff --git a/mmdet/ops/dcn/src/deform_conv_cuda.cpp b/mmdet/ops/dcn/src/deform_conv_cuda.cpp new file mode 100644 index 0000000..c4563ed --- /dev/null +++ b/mmdet/ops/dcn/src/deform_conv_cuda.cpp @@ -0,0 +1,695 @@ +// modify from +// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda.c + +#include + +#include +#include + +void deformable_im2col(const at::Tensor data_im, const at::Tensor data_offset, + const int channels, const int height, const int width, + const int ksize_h, const int ksize_w, const int pad_h, + const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int parallel_imgs, const int deformable_group, + at::Tensor data_col); + +void deformable_col2im(const at::Tensor data_col, const at::Tensor data_offset, + const int channels, const int height, const int width, + const int ksize_h, const int ksize_w, const int pad_h, + const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int parallel_imgs, const int deformable_group, + at::Tensor grad_im); + +void deformable_col2im_coord( + const at::Tensor data_col, const at::Tensor data_im, + const at::Tensor data_offset, const int channels, const int height, + const int width, const int ksize_h, const int ksize_w, const int pad_h, + const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int parallel_imgs, + const int deformable_group, at::Tensor grad_offset); + +void modulated_deformable_im2col_cuda( + const at::Tensor data_im, const at::Tensor data_offset, + const at::Tensor data_mask, const int batch_size, const int channels, + const int height_im, const int width_im, const int height_col, + const int width_col, const int kernel_h, const int kenerl_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int deformable_group, + at::Tensor data_col); + +void modulated_deformable_col2im_cuda( + const at::Tensor data_col, const at::Tensor data_offset, + const at::Tensor data_mask, const int batch_size, const int channels, + const int height_im, const int width_im, const int height_col, + const int width_col, const int kernel_h, const int kenerl_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int deformable_group, + at::Tensor grad_im); + +void modulated_deformable_col2im_coord_cuda( + const at::Tensor data_col, const at::Tensor data_im, + const at::Tensor data_offset, const at::Tensor data_mask, + const int batch_size, const int channels, const int height_im, + const int width_im, const int height_col, const int width_col, + const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, const int dilation_h, + const int dilation_w, const int deformable_group, at::Tensor grad_offset, + at::Tensor grad_mask); + +void shape_check(at::Tensor input, at::Tensor offset, at::Tensor *gradOutput, + at::Tensor weight, int kH, int kW, int dH, int dW, int padH, + int padW, int dilationH, int dilationW, int group, + int deformable_group) { + AT_CHECK(weight.ndimension() == 4, + "4D weight tensor (nOutputPlane,nInputPlane,kH,kW) expected, " + "but got: %s", + weight.ndimension()); + + AT_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); + + AT_CHECK(kW > 0 && kH > 0, + "kernel size should be greater than zero, but got kH: %d kW: %d", kH, + kW); + + AT_CHECK((weight.size(2) == kH && weight.size(3) == kW), + "kernel size should be consistent with weight, ", + "but got kH: %d kW: %d weight.size(2): %d, weight.size(3): %d", kH, + kW, weight.size(2), weight.size(3)); + + AT_CHECK(dW > 0 && dH > 0, + "stride should be greater than zero, but got dH: %d dW: %d", dH, dW); + + AT_CHECK( + dilationW > 0 && dilationH > 0, + "dilation should be greater than 0, but got dilationH: %d dilationW: %d", + dilationH, dilationW); + + int ndim = input.ndimension(); + int dimf = 0; + int dimh = 1; + int dimw = 2; + + if (ndim == 4) { + dimf++; + dimh++; + dimw++; + } + + AT_CHECK(ndim == 3 || ndim == 4, "3D or 4D input tensor expected but got: %s", + ndim); + + long nInputPlane = weight.size(1) * group; + long inputHeight = input.size(dimh); + long inputWidth = input.size(dimw); + long nOutputPlane = weight.size(0); + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + + AT_CHECK(nInputPlane % deformable_group == 0, + "input channels must divide deformable group size"); + + if (outputWidth < 1 || outputHeight < 1) + AT_ERROR( + "Given input size: (%ld x %ld x %ld). " + "Calculated output size: (%ld x %ld x %ld). Output size is too small", + nInputPlane, inputHeight, inputWidth, nOutputPlane, outputHeight, + outputWidth); + + AT_CHECK(input.size(1) == nInputPlane, + "invalid number of input planes, expected: %d, but got: %d", + nInputPlane, input.size(1)); + + AT_CHECK((inputHeight >= kH && inputWidth >= kW), + "input image is smaller than kernel"); + + AT_CHECK((offset.size(2) == outputHeight && offset.size(3) == outputWidth), + "invalid spatial size of offset, expected height: %d width: %d, but " + "got height: %d width: %d", + outputHeight, outputWidth, offset.size(2), offset.size(3)); + + AT_CHECK((offset.size(1) == deformable_group * 2 * kH * kW), + "invalid number of channels of offset"); + + if (gradOutput != NULL) { + AT_CHECK(gradOutput->size(dimf) == nOutputPlane, + "invalid number of gradOutput planes, expected: %d, but got: %d", + nOutputPlane, gradOutput->size(dimf)); + + AT_CHECK((gradOutput->size(dimh) == outputHeight && + gradOutput->size(dimw) == outputWidth), + "invalid size of gradOutput, expected height: %d width: %d , but " + "got height: %d width: %d", + outputHeight, outputWidth, gradOutput->size(dimh), + gradOutput->size(dimw)); + } +} + +int deform_conv_forward_cuda(at::Tensor input, at::Tensor weight, + at::Tensor offset, at::Tensor output, + at::Tensor columns, at::Tensor ones, int kW, + int kH, int dW, int dH, int padW, int padH, + int dilationW, int dilationH, int group, + int deformable_group, int im2col_step) { + // todo: resize columns to include im2col: done + // todo: add im2col_step as input + // todo: add new output buffer and transpose it to output (or directly + // transpose output) todo: possibly change data indexing because of + // parallel_imgs + + shape_check(input, offset, NULL, weight, kH, kW, dH, dW, padH, padW, + dilationH, dilationW, group, deformable_group); + + input = input.contiguous(); + offset = offset.contiguous(); + weight = weight.contiguous(); + + int batch = 1; + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input.unsqueeze_(0); + offset.unsqueeze_(0); + } + + // todo: assert batchsize dividable by im2col_step + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = weight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + AT_CHECK((offset.size(0) == batchSize), "invalid batch size of offset"); + + output = output.view({batchSize / im2col_step, im2col_step, nOutputPlane, + outputHeight, outputWidth}); + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < outputHeight * outputWidth) { + ones = at::ones({outputHeight, outputWidth}, input.options()); + } + + input = input.view({batchSize / im2col_step, im2col_step, nInputPlane, + inputHeight, inputWidth}); + offset = + offset.view({batchSize / im2col_step, im2col_step, + deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + at::Tensor output_buffer = + at::zeros({batchSize / im2col_step, nOutputPlane, + im2col_step * outputHeight, outputWidth}, + output.options()); + + output_buffer = output_buffer.view( + {output_buffer.size(0), group, output_buffer.size(1) / group, + output_buffer.size(2), output_buffer.size(3)}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + deformable_im2col(input[elt], offset[elt], nInputPlane, inputHeight, + inputWidth, kH, kW, padH, padW, dH, dW, dilationH, + dilationW, im2col_step, deformable_group, columns); + + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view({group, weight.size(0) / group, weight.size(1), + weight.size(2), weight.size(3)}); + + for (int g = 0; g < group; g++) { + output_buffer[elt][g] = output_buffer[elt][g] + .flatten(1) + .addmm_(weight[g].flatten(1), columns[g]) + .view_as(output_buffer[elt][g]); + } + } + + output_buffer = output_buffer.view( + {output_buffer.size(0), output_buffer.size(1) * output_buffer.size(2), + output_buffer.size(3), output_buffer.size(4)}); + + output_buffer = output_buffer.view({batchSize / im2col_step, nOutputPlane, + im2col_step, outputHeight, outputWidth}); + output_buffer.transpose_(1, 2); + output.copy_(output_buffer); + output = output.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + output = output.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + offset = offset.view({offset.size(1), offset.size(2), offset.size(3)}); + } + + return 1; +} + +int deform_conv_backward_input_cuda(at::Tensor input, at::Tensor offset, + at::Tensor gradOutput, at::Tensor gradInput, + at::Tensor gradOffset, at::Tensor weight, + at::Tensor columns, int kW, int kH, int dW, + int dH, int padW, int padH, int dilationW, + int dilationH, int group, + int deformable_group, int im2col_step) { + shape_check(input, offset, &gradOutput, weight, kH, kW, dH, dW, padH, padW, + dilationH, dilationW, group, deformable_group); + + input = input.contiguous(); + offset = offset.contiguous(); + gradOutput = gradOutput.contiguous(); + weight = weight.contiguous(); + + int batch = 1; + + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input = input.view({1, input.size(0), input.size(1), input.size(2)}); + offset = offset.view({1, offset.size(0), offset.size(1), offset.size(2)}); + gradOutput = gradOutput.view( + {1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)}); + } + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = weight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + AT_CHECK((offset.size(0) == batchSize), 3, "invalid batch size of offset"); + gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth}); + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + // change order of grad output + gradOutput = gradOutput.view({batchSize / im2col_step, im2col_step, + nOutputPlane, outputHeight, outputWidth}); + gradOutput.transpose_(1, 2); + + gradInput = gradInput.view({batchSize / im2col_step, im2col_step, nInputPlane, + inputHeight, inputWidth}); + input = input.view({batchSize / im2col_step, im2col_step, nInputPlane, + inputHeight, inputWidth}); + gradOffset = gradOffset.view({batchSize / im2col_step, im2col_step, + deformable_group * 2 * kH * kW, outputHeight, + outputWidth}); + offset = + offset.view({batchSize / im2col_step, im2col_step, + deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + // divide into groups + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view({group, weight.size(0) / group, weight.size(1), + weight.size(2), weight.size(3)}); + gradOutput = gradOutput.view( + {gradOutput.size(0), group, gradOutput.size(1) / group, + gradOutput.size(2), gradOutput.size(3), gradOutput.size(4)}); + + for (int g = 0; g < group; g++) { + columns[g] = columns[g].addmm_(weight[g].flatten(1).transpose(0, 1), + gradOutput[elt][g].flatten(1), 0.0f, 1.0f); + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + gradOutput = gradOutput.view( + {gradOutput.size(0), gradOutput.size(1) * gradOutput.size(2), + gradOutput.size(3), gradOutput.size(4), gradOutput.size(5)}); + + deformable_col2im_coord(columns, input[elt], offset[elt], nInputPlane, + inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, + dilationH, dilationW, im2col_step, deformable_group, + gradOffset[elt]); + + deformable_col2im(columns, offset[elt], nInputPlane, inputHeight, + inputWidth, kH, kW, padH, padW, dH, dW, dilationH, + dilationW, im2col_step, deformable_group, gradInput[elt]); + } + + gradOutput.transpose_(1, 2); + gradOutput = + gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth}); + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + gradOffset = gradOffset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + gradInput = gradInput.view({nInputPlane, inputHeight, inputWidth}); + offset = offset.view({offset.size(1), offset.size(2), offset.size(3)}); + gradOffset = + gradOffset.view({offset.size(1), offset.size(2), offset.size(3)}); + } + + return 1; +} + +int deform_conv_backward_parameters_cuda( + at::Tensor input, at::Tensor offset, at::Tensor gradOutput, + at::Tensor gradWeight, // at::Tensor gradBias, + at::Tensor columns, at::Tensor ones, int kW, int kH, int dW, int dH, + int padW, int padH, int dilationW, int dilationH, int group, + int deformable_group, float scale, int im2col_step) { + // todo: transpose and reshape outGrad + // todo: reshape columns + // todo: add im2col_step as input + + shape_check(input, offset, &gradOutput, gradWeight, kH, kW, dH, dW, padH, + padW, dilationH, dilationW, group, deformable_group); + + input = input.contiguous(); + offset = offset.contiguous(); + gradOutput = gradOutput.contiguous(); + + int batch = 1; + + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input = input.view( + at::IntList({1, input.size(0), input.size(1), input.size(2)})); + gradOutput = gradOutput.view( + {1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)}); + } + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = gradWeight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + AT_CHECK((offset.size(0) == batchSize), "invalid batch size of offset"); + + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + gradOutput = gradOutput.view({batchSize / im2col_step, im2col_step, + nOutputPlane, outputHeight, outputWidth}); + gradOutput.transpose_(1, 2); + + at::Tensor gradOutputBuffer = at::zeros_like(gradOutput); + gradOutputBuffer = + gradOutputBuffer.view({batchSize / im2col_step, nOutputPlane, im2col_step, + outputHeight, outputWidth}); + gradOutputBuffer.copy_(gradOutput); + gradOutputBuffer = + gradOutputBuffer.view({batchSize / im2col_step, nOutputPlane, + im2col_step * outputHeight, outputWidth}); + + gradOutput.transpose_(1, 2); + gradOutput = + gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + input = input.view({batchSize / im2col_step, im2col_step, nInputPlane, + inputHeight, inputWidth}); + offset = + offset.view({batchSize / im2col_step, im2col_step, + deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + deformable_im2col(input[elt], offset[elt], nInputPlane, inputHeight, + inputWidth, kH, kW, padH, padW, dH, dW, dilationH, + dilationW, im2col_step, deformable_group, columns); + + // divide into group + gradOutputBuffer = gradOutputBuffer.view( + {gradOutputBuffer.size(0), group, gradOutputBuffer.size(1) / group, + gradOutputBuffer.size(2), gradOutputBuffer.size(3)}); + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + gradWeight = + gradWeight.view({group, gradWeight.size(0) / group, gradWeight.size(1), + gradWeight.size(2), gradWeight.size(3)}); + + for (int g = 0; g < group; g++) { + gradWeight[g] = gradWeight[g] + .flatten(1) + .addmm_(gradOutputBuffer[elt][g].flatten(1), + columns[g].transpose(1, 0), 1.0, scale) + .view_as(gradWeight[g]); + } + gradOutputBuffer = gradOutputBuffer.view( + {gradOutputBuffer.size(0), + gradOutputBuffer.size(1) * gradOutputBuffer.size(2), + gradOutputBuffer.size(3), gradOutputBuffer.size(4)}); + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + gradWeight = gradWeight.view({gradWeight.size(0) * gradWeight.size(1), + gradWeight.size(2), gradWeight.size(3), + gradWeight.size(4)}); + } + + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + } + + return 1; +} + +void modulated_deform_conv_cuda_forward( + at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor ones, + at::Tensor offset, at::Tensor mask, at::Tensor output, at::Tensor columns, + int kernel_h, int kernel_w, const int stride_h, const int stride_w, + const int pad_h, const int pad_w, const int dilation_h, + const int dilation_w, const int group, const int deformable_group, + const bool with_bias) { + AT_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); + AT_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + + const int channels_out = weight.size(0); + const int channels_kernel = weight.size(1); + const int kernel_h_ = weight.size(2); + const int kernel_w_ = weight.size(3); + + if (kernel_h_ != kernel_h || kernel_w_ != kernel_w) + AT_ERROR("Input shape and kernel shape wont match: (%d x %d vs %d x %d).", + kernel_h_, kernel_w, kernel_h_, kernel_w_); + if (channels != channels_kernel * group) + AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).", + channels, channels_kernel * group); + + const int height_out = + (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; + const int width_out = + (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < height_out * width_out) { + // Resize plane and fill with ones... + ones = at::ones({height_out, width_out}, input.options()); + } + + // resize output + output = output.view({batch, channels_out, height_out, width_out}).zero_(); + // resize temporary columns + columns = + at::zeros({channels * kernel_h * kernel_w, 1 * height_out * width_out}, + input.options()); + + output = output.view({output.size(0), group, output.size(1) / group, + output.size(2), output.size(3)}); + + for (int b = 0; b < batch; b++) { + modulated_deformable_im2col_cuda( + input[b], offset[b], mask[b], 1, channels, height, width, height_out, + width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, deformable_group, columns); + + // divide into group + weight = weight.view({group, weight.size(0) / group, weight.size(1), + weight.size(2), weight.size(3)}); + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + + for (int g = 0; g < group; g++) { + output[b][g] = output[b][g] + .flatten(1) + .addmm_(weight[g].flatten(1), columns[g]) + .view_as(output[b][g]); + } + + weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), + weight.size(3), weight.size(4)}); + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + } + + output = output.view({output.size(0), output.size(1) * output.size(2), + output.size(3), output.size(4)}); + + if (with_bias) { + output += bias.view({1, bias.size(0), 1, 1}); + } +} + +void modulated_deform_conv_cuda_backward( + at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor ones, + at::Tensor offset, at::Tensor mask, at::Tensor columns, + at::Tensor grad_input, at::Tensor grad_weight, at::Tensor grad_bias, + at::Tensor grad_offset, at::Tensor grad_mask, at::Tensor grad_output, + int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_h, + int pad_w, int dilation_h, int dilation_w, int group, int deformable_group, + const bool with_bias) { + AT_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); + AT_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + + const int channels_kernel = weight.size(1); + const int kernel_h_ = weight.size(2); + const int kernel_w_ = weight.size(3); + if (kernel_h_ != kernel_h || kernel_w_ != kernel_w) + AT_ERROR("Input shape and kernel shape wont match: (%d x %d vs %d x %d).", + kernel_h_, kernel_w, kernel_h_, kernel_w_); + if (channels != channels_kernel * group) + AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).", + channels, channels_kernel * group); + + const int height_out = + (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; + const int width_out = + (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < height_out * width_out) { + // Resize plane and fill with ones... + ones = at::ones({height_out, width_out}, input.options()); + } + + grad_input = grad_input.view({batch, channels, height, width}); + columns = at::zeros({channels * kernel_h * kernel_w, height_out * width_out}, + input.options()); + + grad_output = + grad_output.view({grad_output.size(0), group, grad_output.size(1) / group, + grad_output.size(2), grad_output.size(3)}); + + for (int b = 0; b < batch; b++) { + // divide int group + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view({group, weight.size(0) / group, weight.size(1), + weight.size(2), weight.size(3)}); + + for (int g = 0; g < group; g++) { + columns[g].addmm_(weight[g].flatten(1).transpose(0, 1), + grad_output[b][g].flatten(1), 0.0f, 1.0f); + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), + weight.size(3), weight.size(4)}); + + // gradient w.r.t. input coordinate data + modulated_deformable_col2im_coord_cuda( + columns, input[b], offset[b], mask[b], 1, channels, height, width, + height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, + stride_w, dilation_h, dilation_w, deformable_group, grad_offset[b], + grad_mask[b]); + // gradient w.r.t. input data + modulated_deformable_col2im_cuda( + columns, offset[b], mask[b], 1, channels, height, width, height_out, + width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, deformable_group, grad_input[b]); + + // gradient w.r.t. weight, dWeight should accumulate across the batch and + // group + modulated_deformable_im2col_cuda( + input[b], offset[b], mask[b], 1, channels, height, width, height_out, + width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, deformable_group, columns); + + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + grad_weight = grad_weight.view({group, grad_weight.size(0) / group, + grad_weight.size(1), grad_weight.size(2), + grad_weight.size(3)}); + if (with_bias) + grad_bias = grad_bias.view({group, grad_bias.size(0) / group}); + + for (int g = 0; g < group; g++) { + grad_weight[g] = + grad_weight[g] + .flatten(1) + .addmm_(grad_output[b][g].flatten(1), columns[g].transpose(0, 1)) + .view_as(grad_weight[g]); + if (with_bias) { + grad_bias[g] = + grad_bias[g] + .view({-1, 1}) + .addmm_(grad_output[b][g].flatten(1), ones.view({-1, 1})) + .view(-1); + } + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + grad_weight = grad_weight.view({grad_weight.size(0) * grad_weight.size(1), + grad_weight.size(2), grad_weight.size(3), + grad_weight.size(4)}); + if (with_bias) + grad_bias = grad_bias.view({grad_bias.size(0) * grad_bias.size(1)}); + } + grad_output = grad_output.view({grad_output.size(0) * grad_output.size(1), + grad_output.size(2), grad_output.size(3), + grad_output.size(4)}); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("deform_conv_forward_cuda", &deform_conv_forward_cuda, + "deform forward (CUDA)"); + m.def("deform_conv_backward_input_cuda", &deform_conv_backward_input_cuda, + "deform_conv_backward_input (CUDA)"); + m.def("deform_conv_backward_parameters_cuda", + &deform_conv_backward_parameters_cuda, + "deform_conv_backward_parameters (CUDA)"); + m.def("modulated_deform_conv_cuda_forward", + &modulated_deform_conv_cuda_forward, + "modulated deform conv forward (CUDA)"); + m.def("modulated_deform_conv_cuda_backward", + &modulated_deform_conv_cuda_backward, + "modulated deform conv backward (CUDA)"); +} diff --git a/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu b/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu new file mode 100644 index 0000000..fd56016 --- /dev/null +++ b/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu @@ -0,0 +1,866 @@ +/*! + ******************* BEGIN Caffe Copyright Notice and Disclaimer **************** + * + * COPYRIGHT + * + * All contributions by the University of California: + * Copyright (c) 2014-2017 The Regents of the University of California (Regents) + * All rights reserved. + * + * All other contributions: + * Copyright (c) 2014-2017, the respective contributors + * All rights reserved. + * + * Caffe uses a shared copyright model: each contributor holds copyright over + * their contributions to Caffe. The project versioning records all such + * contribution and copyright details. If a contributor wants to further mark + * their specific copyright on a particular contribution, they should indicate + * their copyright solely in the commit message of the change when it is + * committed. + * + * LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * CONTRIBUTION AGREEMENT + * + * By contributing to the BVLC/caffe repository through pull-request, comment, + * or otherwise, the contributor releases their content to the + * license and copyright terms herein. + * + ***************** END Caffe Copyright Notice and Disclaimer ******************** + * + * Copyright (c) 2018 Microsoft + * Licensed under The MIT License [see LICENSE for details] + * \file modulated_deformable_im2col.cuh + * \brief Function definitions of converting an image to + * column matrix based on kernel, padding, dilation, and offset. + * These functions are mainly used in deformable convolution operators. + * \ref: https://arxiv.org/abs/1703.06211 + * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng + */ + +// modify from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu + +#include +#include +#include +#include +#include + +using namespace at; + +#define CUDA_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ + i += blockDim.x * gridDim.x) + +const int CUDA_NUM_THREADS = 1024; +const int kMaxGridNum = 65535; + +inline int GET_BLOCKS(const int N) +{ + return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS); +} + +template +__device__ scalar_t deformable_im2col_bilinear(const scalar_t *bottom_data, const int data_width, + const int height, const int width, scalar_t h, scalar_t w) +{ + + int h_low = floor(h); + int w_low = floor(w); + int h_high = h_low + 1; + int w_high = w_low + 1; + + scalar_t lh = h - h_low; + scalar_t lw = w - w_low; + scalar_t hh = 1 - lh, hw = 1 - lw; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + v1 = bottom_data[h_low * data_width + w_low]; + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + v2 = bottom_data[h_low * data_width + w_high]; + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + v3 = bottom_data[h_high * data_width + w_low]; + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + v4 = bottom_data[h_high * data_width + w_high]; + + scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template +__device__ scalar_t get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w, + const int h, const int w, const int height, const int width) +{ + + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) + { + //empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + if (h == argmax_h_low && w == argmax_w_low) + weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); + if (h == argmax_h_low && w == argmax_w_high) + weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); + if (h == argmax_h_high && w == argmax_w_low) + weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); + if (h == argmax_h_high && w == argmax_w_high) + weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); + return weight; +} + +template +__device__ scalar_t get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w, + const int height, const int width, const scalar_t *im_data, + const int data_width, const int bp_dir) +{ + + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) + { + //empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + + if (bp_dir == 0) + { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; + } + else if (bp_dir == 1) + { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; + } + + return weight; +} + +template +__global__ void deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int channel_per_deformable_group, + const int batch_size, const int num_channels, const int deformable_group, + const int height_col, const int width_col, + scalar_t *data_col) +{ + CUDA_KERNEL_LOOP(index, n) + { + // index index of output matrix + const int w_col = index % width_col; + const int h_col = (index / width_col) % height_col; + const int b_col = (index / width_col / height_col) % batch_size; + const int c_im = (index / width_col / height_col) / batch_size; + const int c_col = c_im * kernel_h * kernel_w; + + // compute deformable group index + const int deformable_group_index = c_im / channel_per_deformable_group; + + const int h_in = h_col * stride_h - pad_h; + const int w_in = w_col * stride_w - pad_w; + scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; + //const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; + const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; + const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; + + for (int i = 0; i < kernel_h; ++i) + { + for (int j = 0; j < kernel_w; ++j) + { + const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; + const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + scalar_t val = static_cast(0); + const scalar_t h_im = h_in + i * dilation_h + offset_h; + const scalar_t w_im = w_in + j * dilation_w + offset_w; + if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) + { + //const scalar_t map_h = i * dilation_h + offset_h; + //const scalar_t map_w = j * dilation_w + offset_w; + //const int cur_height = height - h_in; + //const int cur_width = width - w_in; + //val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); + val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); + } + *data_col_ptr = val; + data_col_ptr += batch_size * height_col * width_col; + } + } + } +} + +void deformable_im2col( + const at::Tensor data_im, const at::Tensor data_offset, const int channels, + const int height, const int width, const int ksize_h, const int ksize_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int parallel_imgs, + const int deformable_group, at::Tensor data_col) +{ + // num_axes should be smaller than block size + // todo: check parallel_imgs is correctly passed in + int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = channels * height_col * width_col * parallel_imgs; + int channel_per_deformable_group = channels / deformable_group; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_im.type(), "deformable_im2col_gpu", ([&] { + const scalar_t *data_im_ = data_im.data(); + const scalar_t *data_offset_ = data_offset.data(); + scalar_t *data_col_ = data_col.data(); + + deformable_im2col_gpu_kernel<<>>( + num_kernels, data_im_, data_offset_, height, width, ksize_h, ksize_w, + pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, + channel_per_deformable_group, parallel_imgs, channels, deformable_group, + height_col, width_col, data_col_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in deformable_im2col: %s\n", cudaGetErrorString(err)); + } +} + +template +__global__ void deformable_col2im_gpu_kernel( + const int n, const scalar_t *data_col, const scalar_t *data_offset, + const int channels, const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, const int deformable_group, + const int height_col, const int width_col, + scalar_t *grad_im) +{ + CUDA_KERNEL_LOOP(index, n) + { + const int j = (index / width_col / height_col / batch_size) % kernel_w; + const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; + const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; + // compute the start and end of the output + + const int deformable_group_index = c / channel_per_deformable_group; + + int w_out = index % width_col; + int h_out = (index / width_col) % height_col; + int b = (index / width_col / height_col) % batch_size; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + + const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * + 2 * kernel_h * kernel_w * height_col * width_col; + const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; + const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; + const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; + + const scalar_t cur_top_grad = data_col[index]; + const int cur_h = (int)cur_inv_h_data; + const int cur_w = (int)cur_inv_w_data; + for (int dy = -2; dy <= 2; dy++) + { + for (int dx = -2; dx <= 2; dx++) + { + if (cur_h + dy >= 0 && cur_h + dy < height && + cur_w + dx >= 0 && cur_w + dx < width && + abs(cur_inv_h_data - (cur_h + dy)) < 1 && + abs(cur_inv_w_data - (cur_w + dx)) < 1) + { + int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; + scalar_t weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); + atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); + } + } + } + } +} + +void deformable_col2im( + const at::Tensor data_col, const at::Tensor data_offset, const int channels, + const int height, const int width, const int ksize_h, + const int ksize_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int parallel_imgs, const int deformable_group, + at::Tensor grad_im) +{ + + // todo: make sure parallel_imgs is passed in correctly + int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; + int channel_per_deformable_group = channels / deformable_group; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.type(), "deformable_col2im_gpu", ([&] { + const scalar_t *data_col_ = data_col.data(); + const scalar_t *data_offset_ = data_offset.data(); + scalar_t *grad_im_ = grad_im.data(); + + deformable_col2im_gpu_kernel<<>>( + num_kernels, data_col_, data_offset_, channels, height, width, ksize_h, + ksize_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, channel_per_deformable_group, + parallel_imgs, deformable_group, height_col, width_col, grad_im_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in deformable_col2im: %s\n", cudaGetErrorString(err)); + } +} + +template +__global__ void deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col, + const scalar_t *data_im, const scalar_t *data_offset, + const int channels, const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, const int offset_channels, const int deformable_group, + const int height_col, const int width_col, scalar_t *grad_offset) +{ + CUDA_KERNEL_LOOP(index, n) + { + scalar_t val = 0; + int w = index % width_col; + int h = (index / width_col) % height_col; + int c = (index / width_col / height_col) % offset_channels; + int b = (index / width_col / height_col) / offset_channels; + // compute the start and end of the output + + const int deformable_group_index = c / (2 * kernel_h * kernel_w); + const int col_step = kernel_h * kernel_w; + int cnt = 0; + const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * + batch_size * width_col * height_col; + const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * + channel_per_deformable_group / kernel_h / kernel_w * height * width; + const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * + kernel_h * kernel_w * height_col * width_col; + + const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; + + for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) + { + const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; + const int bp_dir = offset_c % 2; + + int j = (col_pos / width_col / height_col / batch_size) % kernel_w; + int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; + int w_out = col_pos % width_col; + int h_out = (col_pos / width_col) % height_col; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); + const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + scalar_t inv_h = h_in + i * dilation_h + offset_h; + scalar_t inv_w = w_in + j * dilation_w + offset_w; + if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) + { + inv_h = inv_w = -2; + } + const scalar_t weight = get_coordinate_weight( + inv_h, inv_w, + height, width, data_im_ptr + cnt * height * width, width, bp_dir); + val += weight * data_col_ptr[col_pos]; + cnt += 1; + } + + grad_offset[index] = val; + } +} + +void deformable_col2im_coord( + const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, + const int channels, const int height, const int width, const int ksize_h, + const int ksize_w, const int pad_h, const int pad_w, const int stride_h, + const int stride_w, const int dilation_h, const int dilation_w, + const int parallel_imgs, const int deformable_group, at::Tensor grad_offset) +{ + + int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs; + int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.type(), "deformable_col2im_coord_gpu", ([&] { + const scalar_t *data_col_ = data_col.data(); + const scalar_t *data_im_ = data_im.data(); + const scalar_t *data_offset_ = data_offset.data(); + scalar_t *grad_offset_ = grad_offset.data(); + + deformable_col2im_coord_gpu_kernel<<>>( + num_kernels, data_col_, data_im_, data_offset_, channels, height, width, + ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, channel_per_deformable_group, + parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group, + height_col, width_col, grad_offset_); + })); +} + +template +__device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width, + const int height, const int width, scalar_t h, scalar_t w) +{ + int h_low = floor(h); + int w_low = floor(w); + int h_high = h_low + 1; + int w_high = w_low + 1; + + scalar_t lh = h - h_low; + scalar_t lw = w - w_low; + scalar_t hh = 1 - lh, hw = 1 - lw; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + v1 = bottom_data[h_low * data_width + w_low]; + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + v2 = bottom_data[h_low * data_width + w_high]; + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + v3 = bottom_data[h_high * data_width + w_low]; + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + v4 = bottom_data[h_high * data_width + w_high]; + + scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template +__device__ scalar_t dmcn_get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w, + const int h, const int w, const int height, const int width) +{ + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) + { + //empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + if (h == argmax_h_low && w == argmax_w_low) + weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); + if (h == argmax_h_low && w == argmax_w_high) + weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); + if (h == argmax_h_high && w == argmax_w_low) + weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); + if (h == argmax_h_high && w == argmax_w_high) + weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); + return weight; +} + +template +__device__ scalar_t dmcn_get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w, + const int height, const int width, const scalar_t *im_data, + const int data_width, const int bp_dir) +{ + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) + { + //empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + + if (bp_dir == 0) + { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; + } + else if (bp_dir == 1) + { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; + } + + return weight; +} + +template +__global__ void modulated_deformable_im2col_gpu_kernel(const int n, + const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, const int num_channels, const int deformable_group, + const int height_col, const int width_col, + scalar_t *data_col) +{ + CUDA_KERNEL_LOOP(index, n) + { + // index index of output matrix + const int w_col = index % width_col; + const int h_col = (index / width_col) % height_col; + const int b_col = (index / width_col / height_col) % batch_size; + const int c_im = (index / width_col / height_col) / batch_size; + const int c_col = c_im * kernel_h * kernel_w; + + // compute deformable group index + const int deformable_group_index = c_im / channel_per_deformable_group; + + const int h_in = h_col * stride_h - pad_h; + const int w_in = w_col * stride_w - pad_w; + + scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; + //const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; + const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; + const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; + + const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; + + for (int i = 0; i < kernel_h; ++i) + { + for (int j = 0; j < kernel_w; ++j) + { + const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; + const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; + const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; + scalar_t val = static_cast(0); + const scalar_t h_im = h_in + i * dilation_h + offset_h; + const scalar_t w_im = w_in + j * dilation_w + offset_w; + //if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { + if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) + { + //const float map_h = i * dilation_h + offset_h; + //const float map_w = j * dilation_w + offset_w; + //const int cur_height = height - h_in; + //const int cur_width = width - w_in; + //val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); + val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); + } + *data_col_ptr = val * mask; + data_col_ptr += batch_size * height_col * width_col; + //data_col_ptr += height_col * width_col; + } + } + } +} + +template +__global__ void modulated_deformable_col2im_gpu_kernel(const int n, + const scalar_t *data_col, const scalar_t *data_offset, const scalar_t *data_mask, + const int channels, const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, const int deformable_group, + const int height_col, const int width_col, + scalar_t *grad_im) +{ + CUDA_KERNEL_LOOP(index, n) + { + const int j = (index / width_col / height_col / batch_size) % kernel_w; + const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; + const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; + // compute the start and end of the output + + const int deformable_group_index = c / channel_per_deformable_group; + + int w_out = index % width_col; + int h_out = (index / width_col) % height_col; + int b = (index / width_col / height_col) % batch_size; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + + const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; + const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; + const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; + const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; + const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; + const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; + const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; + + const scalar_t cur_top_grad = data_col[index] * mask; + const int cur_h = (int)cur_inv_h_data; + const int cur_w = (int)cur_inv_w_data; + for (int dy = -2; dy <= 2; dy++) + { + for (int dx = -2; dx <= 2; dx++) + { + if (cur_h + dy >= 0 && cur_h + dy < height && + cur_w + dx >= 0 && cur_w + dx < width && + abs(cur_inv_h_data - (cur_h + dy)) < 1 && + abs(cur_inv_w_data - (cur_w + dx)) < 1) + { + int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; + scalar_t weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); + atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); + } + } + } + } +} + +template +__global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n, + const scalar_t *data_col, const scalar_t *data_im, + const scalar_t *data_offset, const scalar_t *data_mask, + const int channels, const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, const int offset_channels, const int deformable_group, + const int height_col, const int width_col, + scalar_t *grad_offset, scalar_t *grad_mask) +{ + CUDA_KERNEL_LOOP(index, n) + { + scalar_t val = 0, mval = 0; + int w = index % width_col; + int h = (index / width_col) % height_col; + int c = (index / width_col / height_col) % offset_channels; + int b = (index / width_col / height_col) / offset_channels; + // compute the start and end of the output + + const int deformable_group_index = c / (2 * kernel_h * kernel_w); + const int col_step = kernel_h * kernel_w; + int cnt = 0; + const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; + const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; + const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; + const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; + + const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; + + for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) + { + const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; + const int bp_dir = offset_c % 2; + + int j = (col_pos / width_col / height_col / batch_size) % kernel_w; + int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; + int w_out = col_pos % width_col; + int h_out = (col_pos / width_col) % height_col; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); + const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); + const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; + scalar_t inv_h = h_in + i * dilation_h + offset_h; + scalar_t inv_w = w_in + j * dilation_w + offset_w; + if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) + { + inv_h = inv_w = -2; + } + else + { + mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w); + } + const scalar_t weight = dmcn_get_coordinate_weight( + inv_h, inv_w, + height, width, data_im_ptr + cnt * height * width, width, bp_dir); + val += weight * data_col_ptr[col_pos] * mask; + cnt += 1; + } + // KERNEL_ASSIGN(grad_offset[index], offset_req, val); + grad_offset[index] = val; + if (offset_c % 2 == 0) + // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval); + grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval; + } +} + +void modulated_deformable_im2col_cuda( + const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, + const int batch_size, const int channels, const int height_im, const int width_im, + const int height_col, const int width_col, const int kernel_h, const int kenerl_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int deformable_group, at::Tensor data_col) +{ + // num_axes should be smaller than block size + const int channel_per_deformable_group = channels / deformable_group; + const int num_kernels = channels * batch_size * height_col * width_col; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_im.type(), "modulated_deformable_im2col_gpu", ([&] { + const scalar_t *data_im_ = data_im.data(); + const scalar_t *data_offset_ = data_offset.data(); + const scalar_t *data_mask_ = data_mask.data(); + scalar_t *data_col_ = data_col.data(); + + modulated_deformable_im2col_gpu_kernel<<>>( + num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w, + pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, + batch_size, channels, deformable_group, height_col, width_col, data_col_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); + } +} + +void modulated_deformable_col2im_cuda( + const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask, + const int batch_size, const int channels, const int height_im, const int width_im, + const int height_col, const int width_col, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int deformable_group, at::Tensor grad_im) +{ + + const int channel_per_deformable_group = channels / deformable_group; + const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.type(), "modulated_deformable_col2im_gpu", ([&] { + const scalar_t *data_col_ = data_col.data(); + const scalar_t *data_offset_ = data_offset.data(); + const scalar_t *data_mask_ = data_mask.data(); + scalar_t *grad_im_ = grad_im.data(); + + modulated_deformable_col2im_gpu_kernel<<>>( + num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im, + kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w, + dilation_h, dilation_w, channel_per_deformable_group, + batch_size, deformable_group, height_col, width_col, grad_im_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in modulated_deformable_col2im_cuda: %s\n", cudaGetErrorString(err)); + } +} + +void modulated_deformable_col2im_coord_cuda( + const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, + const int batch_size, const int channels, const int height_im, const int width_im, + const int height_col, const int width_col, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int deformable_group, + at::Tensor grad_offset, at::Tensor grad_mask) +{ + const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group; + const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.type(), "modulated_deformable_col2im_coord_gpu", ([&] { + const scalar_t *data_col_ = data_col.data(); + const scalar_t *data_im_ = data_im.data(); + const scalar_t *data_offset_ = data_offset.data(); + const scalar_t *data_mask_ = data_mask.data(); + scalar_t *grad_offset_ = grad_offset.data(); + scalar_t *grad_mask_ = grad_mask.data(); + + modulated_deformable_col2im_coord_gpu_kernel<<>>( + num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im, + kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, channel_per_deformable_group, + batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col, + grad_offset_, grad_mask_); + })); + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in modulated_deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err)); + } +} diff --git a/mmdet/ops/dcn/src/deform_pool_cuda.cpp b/mmdet/ops/dcn/src/deform_pool_cuda.cpp new file mode 100644 index 0000000..803d5f1 --- /dev/null +++ b/mmdet/ops/dcn/src/deform_pool_cuda.cpp @@ -0,0 +1,87 @@ +// modify from +// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/modulated_dcn_cuda.c + +// based on +// author: Charles Shang +// https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu + +#include + +#include +#include + +void DeformablePSROIPoolForward( + const at::Tensor data, const at::Tensor bbox, const at::Tensor trans, + at::Tensor out, at::Tensor top_count, const int batch, const int channels, + const int height, const int width, const int num_bbox, + const int channels_trans, const int no_trans, const float spatial_scale, + const int output_dim, const int group_size, const int pooled_size, + const int part_size, const int sample_per_part, const float trans_std); + +void DeformablePSROIPoolBackwardAcc( + const at::Tensor out_grad, const at::Tensor data, const at::Tensor bbox, + const at::Tensor trans, const at::Tensor top_count, at::Tensor in_grad, + at::Tensor trans_grad, const int batch, const int channels, + const int height, const int width, const int num_bbox, + const int channels_trans, const int no_trans, const float spatial_scale, + const int output_dim, const int group_size, const int pooled_size, + const int part_size, const int sample_per_part, const float trans_std); + +void deform_psroi_pooling_cuda_forward( + at::Tensor input, at::Tensor bbox, at::Tensor trans, at::Tensor out, + at::Tensor top_count, const int no_trans, const float spatial_scale, + const int output_dim, const int group_size, const int pooled_size, + const int part_size, const int sample_per_part, const float trans_std) { + AT_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + const int channels_trans = no_trans ? 2 : trans.size(1); + + const int num_bbox = bbox.size(0); + if (num_bbox != out.size(0)) + AT_ERROR("Output shape and bbox number wont match: (%d vs %d).", + out.size(0), num_bbox); + + DeformablePSROIPoolForward( + input, bbox, trans, out, top_count, batch, channels, height, width, + num_bbox, channels_trans, no_trans, spatial_scale, output_dim, group_size, + pooled_size, part_size, sample_per_part, trans_std); +} + +void deform_psroi_pooling_cuda_backward( + at::Tensor out_grad, at::Tensor input, at::Tensor bbox, at::Tensor trans, + at::Tensor top_count, at::Tensor input_grad, at::Tensor trans_grad, + const int no_trans, const float spatial_scale, const int output_dim, + const int group_size, const int pooled_size, const int part_size, + const int sample_per_part, const float trans_std) { + AT_CHECK(out_grad.is_contiguous(), "out_grad tensor has to be contiguous"); + AT_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + const int channels_trans = no_trans ? 2 : trans.size(1); + + const int num_bbox = bbox.size(0); + if (num_bbox != out_grad.size(0)) + AT_ERROR("Output shape and bbox number wont match: (%d vs %d).", + out_grad.size(0), num_bbox); + + DeformablePSROIPoolBackwardAcc( + out_grad, input, bbox, trans, top_count, input_grad, trans_grad, batch, + channels, height, width, num_bbox, channels_trans, no_trans, + spatial_scale, output_dim, group_size, pooled_size, part_size, + sample_per_part, trans_std); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("deform_psroi_pooling_cuda_forward", &deform_psroi_pooling_cuda_forward, + "deform psroi pooling forward(CUDA)"); + m.def("deform_psroi_pooling_cuda_backward", + &deform_psroi_pooling_cuda_backward, + "deform psroi pooling backward(CUDA)"); +} \ No newline at end of file diff --git a/mmdet/ops/dcn/src/deform_pool_cuda_kernel.cu b/mmdet/ops/dcn/src/deform_pool_cuda_kernel.cu new file mode 100644 index 0000000..e494460 --- /dev/null +++ b/mmdet/ops/dcn/src/deform_pool_cuda_kernel.cu @@ -0,0 +1,364 @@ +/*! + * Copyright (c) 2017 Microsoft + * Licensed under The MIT License [see LICENSE for details] + * \file deformable_psroi_pooling.cu + * \brief + * \author Yi Li, Guodong Zhang, Jifeng Dai +*/ +/***************** Adapted by Charles Shang *********************/ +// modify from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/cuda/deform_psroi_pooling_cuda.cu + +#include +#include +#include +#include +#include + +using namespace at; + +#define CUDA_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ + i < (n); \ + i += blockDim.x * gridDim.x) + +const int CUDA_NUM_THREADS = 1024; +inline int GET_BLOCKS(const int N) +{ + return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; +} + +template +__device__ scalar_t bilinear_interp( + const scalar_t *data, + const scalar_t x, + const scalar_t y, + const int width, + const int height) +{ + int x1 = floor(x); + int x2 = ceil(x); + int y1 = floor(y); + int y2 = ceil(y); + scalar_t dist_x = (scalar_t)(x - x1); + scalar_t dist_y = (scalar_t)(y - y1); + scalar_t value11 = data[y1 * width + x1]; + scalar_t value12 = data[y2 * width + x1]; + scalar_t value21 = data[y1 * width + x2]; + scalar_t value22 = data[y2 * width + x2]; + scalar_t value = (1 - dist_x) * (1 - dist_y) * value11 + (1 - dist_x) * dist_y * value12 + dist_x * (1 - dist_y) * value21 + dist_x * dist_y * value22; + return value; +} + +template +__global__ void DeformablePSROIPoolForwardKernel( + const int count, + const scalar_t *bottom_data, + const scalar_t spatial_scale, + const int channels, + const int height, const int width, + const int pooled_height, const int pooled_width, + const scalar_t *bottom_rois, const scalar_t *bottom_trans, + const int no_trans, + const scalar_t trans_std, + const int sample_per_part, + const int output_dim, + const int group_size, + const int part_size, + const int num_classes, + const int channels_each_class, + scalar_t *top_data, + scalar_t *top_count) +{ + CUDA_KERNEL_LOOP(index, count) + { + // The output is in order (n, ctop, ph, pw) + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int ctop = (index / pooled_width / pooled_height) % output_dim; + int n = index / pooled_width / pooled_height / output_dim; + + // [start, end) interval for spatial sampling + const scalar_t *offset_bottom_rois = bottom_rois + n * 5; + int roi_batch_ind = offset_bottom_rois[0]; + scalar_t roi_start_w = (scalar_t)(round(offset_bottom_rois[1])) * spatial_scale - 0.5; + scalar_t roi_start_h = (scalar_t)(round(offset_bottom_rois[2])) * spatial_scale - 0.5; + scalar_t roi_end_w = (scalar_t)(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5; + scalar_t roi_end_h = (scalar_t)(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5; + + // Force too small ROIs to be 1x1 + scalar_t roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 + scalar_t roi_height = max(roi_end_h - roi_start_h, 0.1); + + // Compute w and h at bottom + scalar_t bin_size_h = roi_height / (scalar_t)(pooled_height); + scalar_t bin_size_w = roi_width / (scalar_t)(pooled_width); + + scalar_t sub_bin_size_h = bin_size_h / (scalar_t)(sample_per_part); + scalar_t sub_bin_size_w = bin_size_w / (scalar_t)(sample_per_part); + + int part_h = floor((scalar_t)(ph) / pooled_height * part_size); + int part_w = floor((scalar_t)(pw) / pooled_width * part_size); + int class_id = ctop / channels_each_class; + scalar_t trans_x = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std; + scalar_t trans_y = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std; + + scalar_t wstart = (scalar_t)(pw)*bin_size_w + roi_start_w; + wstart += trans_x * roi_width; + scalar_t hstart = (scalar_t)(ph)*bin_size_h + roi_start_h; + hstart += trans_y * roi_height; + + scalar_t sum = 0; + int count = 0; + int gw = floor((scalar_t)(pw)*group_size / pooled_width); + int gh = floor((scalar_t)(ph)*group_size / pooled_height); + gw = min(max(gw, 0), group_size - 1); + gh = min(max(gh, 0), group_size - 1); + + const scalar_t *offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width; + for (int ih = 0; ih < sample_per_part; ih++) + { + for (int iw = 0; iw < sample_per_part; iw++) + { + scalar_t w = wstart + iw * sub_bin_size_w; + scalar_t h = hstart + ih * sub_bin_size_h; + // bilinear interpolation + if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) + { + continue; + } + w = min(max(w, 0.), width - 1.); + h = min(max(h, 0.), height - 1.); + int c = (ctop * group_size + gh) * group_size + gw; + scalar_t val = bilinear_interp(offset_bottom_data + c * height * width, w, h, width, height); + sum += val; + count++; + } + } + top_data[index] = count == 0 ? (scalar_t)(0) : sum / count; + top_count[index] = count; + } +} + +template +__global__ void DeformablePSROIPoolBackwardAccKernel( + const int count, + const scalar_t *top_diff, + const scalar_t *top_count, + const int num_rois, + const scalar_t spatial_scale, + const int channels, + const int height, const int width, + const int pooled_height, const int pooled_width, + const int output_dim, + scalar_t *bottom_data_diff, scalar_t *bottom_trans_diff, + const scalar_t *bottom_data, + const scalar_t *bottom_rois, + const scalar_t *bottom_trans, + const int no_trans, + const scalar_t trans_std, + const int sample_per_part, + const int group_size, + const int part_size, + const int num_classes, + const int channels_each_class) +{ + CUDA_KERNEL_LOOP(index, count) + { + // The output is in order (n, ctop, ph, pw) + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int ctop = (index / pooled_width / pooled_height) % output_dim; + int n = index / pooled_width / pooled_height / output_dim; + + // [start, end) interval for spatial sampling + const scalar_t *offset_bottom_rois = bottom_rois + n * 5; + int roi_batch_ind = offset_bottom_rois[0]; + scalar_t roi_start_w = (scalar_t)(round(offset_bottom_rois[1])) * spatial_scale - 0.5; + scalar_t roi_start_h = (scalar_t)(round(offset_bottom_rois[2])) * spatial_scale - 0.5; + scalar_t roi_end_w = (scalar_t)(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5; + scalar_t roi_end_h = (scalar_t)(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5; + + // Force too small ROIs to be 1x1 + scalar_t roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 + scalar_t roi_height = max(roi_end_h - roi_start_h, 0.1); + + // Compute w and h at bottom + scalar_t bin_size_h = roi_height / (scalar_t)(pooled_height); + scalar_t bin_size_w = roi_width / (scalar_t)(pooled_width); + + scalar_t sub_bin_size_h = bin_size_h / (scalar_t)(sample_per_part); + scalar_t sub_bin_size_w = bin_size_w / (scalar_t)(sample_per_part); + + int part_h = floor((scalar_t)(ph) / pooled_height * part_size); + int part_w = floor((scalar_t)(pw) / pooled_width * part_size); + int class_id = ctop / channels_each_class; + scalar_t trans_x = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std; + scalar_t trans_y = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std; + + scalar_t wstart = (scalar_t)(pw)*bin_size_w + roi_start_w; + wstart += trans_x * roi_width; + scalar_t hstart = (scalar_t)(ph)*bin_size_h + roi_start_h; + hstart += trans_y * roi_height; + + if (top_count[index] <= 0) + { + continue; + } + scalar_t diff_val = top_diff[index] / top_count[index]; + const scalar_t *offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width; + scalar_t *offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width; + int gw = floor((scalar_t)(pw)*group_size / pooled_width); + int gh = floor((scalar_t)(ph)*group_size / pooled_height); + gw = min(max(gw, 0), group_size - 1); + gh = min(max(gh, 0), group_size - 1); + + for (int ih = 0; ih < sample_per_part; ih++) + { + for (int iw = 0; iw < sample_per_part; iw++) + { + scalar_t w = wstart + iw * sub_bin_size_w; + scalar_t h = hstart + ih * sub_bin_size_h; + // bilinear interpolation + if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) + { + continue; + } + w = min(max(w, 0.), width - 1.); + h = min(max(h, 0.), height - 1.); + int c = (ctop * group_size + gh) * group_size + gw; + // backward on feature + int x0 = floor(w); + int x1 = ceil(w); + int y0 = floor(h); + int y1 = ceil(h); + scalar_t dist_x = w - x0, dist_y = h - y0; + scalar_t q00 = (1 - dist_x) * (1 - dist_y); + scalar_t q01 = (1 - dist_x) * dist_y; + scalar_t q10 = dist_x * (1 - dist_y); + scalar_t q11 = dist_x * dist_y; + int bottom_index_base = c * height * width; + atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x0, q00 * diff_val); + atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x0, q01 * diff_val); + atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x1, q10 * diff_val); + atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x1, q11 * diff_val); + + if (no_trans) + { + continue; + } + scalar_t U00 = offset_bottom_data[bottom_index_base + y0 * width + x0]; + scalar_t U01 = offset_bottom_data[bottom_index_base + y1 * width + x0]; + scalar_t U10 = offset_bottom_data[bottom_index_base + y0 * width + x1]; + scalar_t U11 = offset_bottom_data[bottom_index_base + y1 * width + x1]; + scalar_t diff_x = (U11 * dist_y + U10 * (1 - dist_y) - U01 * dist_y - U00 * (1 - dist_y)) * trans_std * diff_val; + diff_x *= roi_width; + scalar_t diff_y = (U11 * dist_x + U01 * (1 - dist_x) - U10 * dist_x - U00 * (1 - dist_x)) * trans_std * diff_val; + diff_y *= roi_height; + + atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w, diff_x); + atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w, diff_y); + } + } + } +} + +void DeformablePSROIPoolForward(const at::Tensor data, + const at::Tensor bbox, + const at::Tensor trans, + at::Tensor out, + at::Tensor top_count, + const int batch, + const int channels, + const int height, + const int width, + const int num_bbox, + const int channels_trans, + const int no_trans, + const float spatial_scale, + const int output_dim, + const int group_size, + const int pooled_size, + const int part_size, + const int sample_per_part, + const float trans_std) +{ + const int pooled_height = pooled_size; + const int pooled_width = pooled_size; + const int count = num_bbox * output_dim * pooled_height * pooled_width; + const int num_classes = no_trans ? 1 : channels_trans / 2; + const int channels_each_class = no_trans ? output_dim : output_dim / num_classes; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data.type(), "deformable_psroi_pool_forward", ([&] { + const scalar_t *bottom_data = data.data(); + const scalar_t *bottom_rois = bbox.data(); + const scalar_t *bottom_trans = no_trans ? NULL : trans.data(); + scalar_t *top_data = out.data(); + scalar_t *top_count_data = top_count.data(); + + DeformablePSROIPoolForwardKernel<<>>( + count, bottom_data, (scalar_t)spatial_scale, channels, height, width, pooled_height, pooled_width, + bottom_rois, bottom_trans, no_trans, (scalar_t)trans_std, sample_per_part, output_dim, + group_size, part_size, num_classes, channels_each_class, top_data, top_count_data); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in DeformablePSROIPoolForward: %s\n", cudaGetErrorString(err)); + } +} + +void DeformablePSROIPoolBackwardAcc(const at::Tensor out_grad, + const at::Tensor data, + const at::Tensor bbox, + const at::Tensor trans, + const at::Tensor top_count, + at::Tensor in_grad, + at::Tensor trans_grad, + const int batch, + const int channels, + const int height, + const int width, + const int num_bbox, + const int channels_trans, + const int no_trans, + const float spatial_scale, + const int output_dim, + const int group_size, + const int pooled_size, + const int part_size, + const int sample_per_part, + const float trans_std) +{ + // LOG(INFO) << "DeformablePSROIPoolBackward"; + const int num_rois = num_bbox; + const int pooled_height = pooled_size; + const int pooled_width = pooled_size; + const int count = num_bbox * output_dim * pooled_height * pooled_width; + const int num_classes = no_trans ? 1 : channels_trans / 2; + const int channels_each_class = no_trans ? output_dim : output_dim / num_classes; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + out_grad.type(), "deformable_psroi_pool_backward_acc", ([&] { + const scalar_t *top_diff = out_grad.data(); + const scalar_t *bottom_data = data.data(); + const scalar_t *bottom_rois = bbox.data(); + const scalar_t *bottom_trans = no_trans ? NULL : trans.data(); + scalar_t *bottom_data_diff = in_grad.data(); + scalar_t *bottom_trans_diff = no_trans ? NULL : trans_grad.data(); + const scalar_t *top_count_data = top_count.data(); + + DeformablePSROIPoolBackwardAccKernel<<>>( + count, top_diff, top_count_data, num_rois, (scalar_t)spatial_scale, channels, height, width, + pooled_height, pooled_width, output_dim, bottom_data_diff, bottom_trans_diff, + bottom_data, bottom_rois, bottom_trans, no_trans, (scalar_t)trans_std, sample_per_part, + group_size, part_size, num_classes, channels_each_class); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in DeformablePSROIPoolForward: %s\n", cudaGetErrorString(err)); + } +} \ No newline at end of file diff --git a/mmdet/ops/nms/__init__.py b/mmdet/ops/nms/__init__.py new file mode 100644 index 0000000..c440704 --- /dev/null +++ b/mmdet/ops/nms/__init__.py @@ -0,0 +1,3 @@ +from .nms_wrapper import nms, soft_nms + +__all__ = ['nms', 'soft_nms'] diff --git a/mmdet/ops/nms/nms_wrapper.py b/mmdet/ops/nms/nms_wrapper.py new file mode 100644 index 0000000..8ce5bc4 --- /dev/null +++ b/mmdet/ops/nms/nms_wrapper.py @@ -0,0 +1,78 @@ +import numpy as np +import torch + +from . import nms_cuda, nms_cpu +from .soft_nms_cpu import soft_nms_cpu + + +def nms(dets, iou_thr, device_id=None): + """Dispatch to either CPU or GPU NMS implementations. + + The input can be either a torch tensor or numpy array. GPU NMS will be used + if the input is a gpu tensor or device_id is specified, otherwise CPU NMS + will be used. The returned type will always be the same as inputs. + + Arguments: + dets (torch.Tensor or np.ndarray): bboxes with scores. + iou_thr (float): IoU threshold for NMS. + device_id (int, optional): when `dets` is a numpy array, if `device_id` + is None, then cpu nms is used, otherwise gpu_nms will be used. + + Returns: + tuple: kept bboxes and indice, which is always the same data type as + the input. + """ + # convert dets (tensor or numpy array) to tensor + if isinstance(dets, torch.Tensor): + is_numpy = False + dets_th = dets + elif isinstance(dets, np.ndarray): + is_numpy = True + device = 'cpu' if device_id is None else 'cuda:{}'.format(device_id) + dets_th = torch.from_numpy(dets).to(device) + else: + raise TypeError( + 'dets must be either a Tensor or numpy array, but got {}'.format( + type(dets))) + + # execute cpu or cuda nms + if dets_th.shape[0] == 0: + inds = dets_th.new_zeros(0, dtype=torch.long) + else: + if dets_th.is_cuda: + inds = nms_cuda.nms(dets_th, iou_thr) + else: + inds = nms_cpu.nms(dets_th, iou_thr) + + if is_numpy: + inds = inds.cpu().numpy() + return dets[inds, :], inds + + +def soft_nms(dets, iou_thr, method='linear', sigma=0.5, min_score=1e-3): + if isinstance(dets, torch.Tensor): + is_tensor = True + dets_np = dets.detach().cpu().numpy() + elif isinstance(dets, np.ndarray): + is_tensor = False + dets_np = dets + else: + raise TypeError( + 'dets must be either a Tensor or numpy array, but got {}'.format( + type(dets))) + + method_codes = {'linear': 1, 'gaussian': 2} + if method not in method_codes: + raise ValueError('Invalid method for SoftNMS: {}'.format(method)) + new_dets, inds = soft_nms_cpu( + dets_np, + iou_thr, + method=method_codes[method], + sigma=sigma, + min_score=min_score) + + if is_tensor: + return dets.new_tensor(new_dets), dets.new_tensor( + inds, dtype=torch.long) + else: + return new_dets.astype(np.float32), inds.astype(np.int64) diff --git a/mmdet/ops/nms/setup.py b/mmdet/ops/nms/setup.py new file mode 100644 index 0000000..28f3b4e --- /dev/null +++ b/mmdet/ops/nms/setup.py @@ -0,0 +1,84 @@ +import os.path as osp +from setuptools import setup, Extension + +import numpy as np +from Cython.Build import cythonize +from Cython.Distutils import build_ext +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + +ext_args = dict( + include_dirs=[np.get_include()], + language='c++', + extra_compile_args={ + 'cc': ['-Wno-unused-function', '-Wno-write-strings'], + 'nvcc': ['-c', '--compiler-options', '-fPIC'], + }, +) + +extensions = [ + Extension('soft_nms_cpu', ['src/soft_nms_cpu.pyx'], **ext_args), +] + + +def customize_compiler_for_nvcc(self): + """inject deep into distutils to customize how the dispatch + to cc/nvcc works. + If you subclass UnixCCompiler, it's not trivial to get your subclass + injected in, and still have the right customizations (i.e. + distutils.sysconfig.customize_compiler) run on it. So instead of going + the OO route, I have this. Note, it's kindof like a wierd functional + subclassing going on.""" + + # tell the compiler it can processes .cu + self.src_extensions.append('.cu') + + # save references to the default compiler_so and _comple methods + default_compiler_so = self.compiler_so + super = self._compile + + # now redefine the _compile method. This gets executed for each + # object but distutils doesn't have the ability to change compilers + # based on source extension: we add it. + def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts): + if osp.splitext(src)[1] == '.cu': + # use the cuda for .cu files + self.set_executable('compiler_so', 'nvcc') + # use only a subset of the extra_postargs, which are 1-1 translated + # from the extra_compile_args in the Extension class + postargs = extra_postargs['nvcc'] + else: + postargs = extra_postargs['cc'] + + super(obj, src, ext, cc_args, postargs, pp_opts) + # reset the default compiler_so, which we might have changed for cuda + self.compiler_so = default_compiler_so + + # inject our redefined _compile method into the class + self._compile = _compile + + +class custom_build_ext(build_ext): + + def build_extensions(self): + customize_compiler_for_nvcc(self.compiler) + build_ext.build_extensions(self) + + +setup( + name='soft_nms', + cmdclass={'build_ext': custom_build_ext}, + ext_modules=cythonize(extensions), +) + +setup( + name='nms_cuda', + ext_modules=[ + CUDAExtension('nms_cuda', [ + 'src/nms_cuda.cpp', + 'src/nms_kernel.cu', + ]), + CUDAExtension('nms_cpu', [ + 'src/nms_cpu.cpp', + ]), + ], + cmdclass={'build_ext': BuildExtension}) diff --git a/mmdet/ops/nms/src/nms_cpu.cpp b/mmdet/ops/nms/src/nms_cpu.cpp new file mode 100644 index 0000000..65546ef --- /dev/null +++ b/mmdet/ops/nms/src/nms_cpu.cpp @@ -0,0 +1,71 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +#include + +template +at::Tensor nms_cpu_kernel(const at::Tensor& dets, const float threshold) { + AT_ASSERTM(!dets.type().is_cuda(), "dets must be a CPU tensor"); + + if (dets.numel() == 0) { + return at::empty({0}, dets.options().dtype(at::kLong).device(at::kCPU)); + } + + auto x1_t = dets.select(1, 0).contiguous(); + auto y1_t = dets.select(1, 1).contiguous(); + auto x2_t = dets.select(1, 2).contiguous(); + auto y2_t = dets.select(1, 3).contiguous(); + auto scores = dets.select(1, 4).contiguous(); + + at::Tensor areas_t = (x2_t - x1_t + 1) * (y2_t - y1_t + 1); + + auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); + + auto ndets = dets.size(0); + at::Tensor suppressed_t = + at::zeros({ndets}, dets.options().dtype(at::kByte).device(at::kCPU)); + + auto suppressed = suppressed_t.data(); + auto order = order_t.data(); + auto x1 = x1_t.data(); + auto y1 = y1_t.data(); + auto x2 = x2_t.data(); + auto y2 = y2_t.data(); + auto areas = areas_t.data(); + + for (int64_t _i = 0; _i < ndets; _i++) { + auto i = order[_i]; + if (suppressed[i] == 1) continue; + auto ix1 = x1[i]; + auto iy1 = y1[i]; + auto ix2 = x2[i]; + auto iy2 = y2[i]; + auto iarea = areas[i]; + + for (int64_t _j = _i + 1; _j < ndets; _j++) { + auto j = order[_j]; + if (suppressed[j] == 1) continue; + auto xx1 = std::max(ix1, x1[j]); + auto yy1 = std::max(iy1, y1[j]); + auto xx2 = std::min(ix2, x2[j]); + auto yy2 = std::min(iy2, y2[j]); + + auto w = std::max(static_cast(0), xx2 - xx1 + 1); + auto h = std::max(static_cast(0), yy2 - yy1 + 1); + auto inter = w * h; + auto ovr = inter / (iarea + areas[j] - inter); + if (ovr >= threshold) suppressed[j] = 1; + } + } + return at::nonzero(suppressed_t == 0).squeeze(1); +} + +at::Tensor nms(const at::Tensor& dets, const float threshold) { + at::Tensor result; + AT_DISPATCH_FLOATING_TYPES(dets.type(), "nms", [&] { + result = nms_cpu_kernel(dets, threshold); + }); + return result; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("nms", &nms, "non-maximum suppression"); +} \ No newline at end of file diff --git a/mmdet/ops/nms/src/nms_cuda.cpp b/mmdet/ops/nms/src/nms_cuda.cpp new file mode 100644 index 0000000..0ea6f9b --- /dev/null +++ b/mmdet/ops/nms/src/nms_cuda.cpp @@ -0,0 +1,17 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +#include + +#define CHECK_CUDA(x) AT_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ") + +at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh); + +at::Tensor nms(const at::Tensor& dets, const float threshold) { + CHECK_CUDA(dets); + if (dets.numel() == 0) + return at::empty({0}, dets.options().dtype(at::kLong).device(at::kCPU)); + return nms_cuda(dets, threshold); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("nms", &nms, "non-maximum suppression"); +} \ No newline at end of file diff --git a/mmdet/ops/nms/src/nms_kernel.cu b/mmdet/ops/nms/src/nms_kernel.cu new file mode 100644 index 0000000..9254f2a --- /dev/null +++ b/mmdet/ops/nms/src/nms_kernel.cu @@ -0,0 +1,131 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +#include +#include + +#include +#include + +#include +#include + +int const threadsPerBlock = sizeof(unsigned long long) * 8; + +__device__ inline float devIoU(float const * const a, float const * const b) { + float left = max(a[0], b[0]), right = min(a[2], b[2]); + float top = max(a[1], b[1]), bottom = min(a[3], b[3]); + float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); + float interS = width * height; + float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); + float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); + return interS / (Sa + Sb - interS); +} + +__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, + const float *dev_boxes, unsigned long long *dev_mask) { + const int row_start = blockIdx.y; + const int col_start = blockIdx.x; + + // if (row_start > col_start) return; + + const int row_size = + min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); + const int col_size = + min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); + + __shared__ float block_boxes[threadsPerBlock * 5]; + if (threadIdx.x < col_size) { + block_boxes[threadIdx.x * 5 + 0] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; + block_boxes[threadIdx.x * 5 + 1] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; + block_boxes[threadIdx.x * 5 + 2] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; + block_boxes[threadIdx.x * 5 + 3] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; + block_boxes[threadIdx.x * 5 + 4] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; + } + __syncthreads(); + + if (threadIdx.x < row_size) { + const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; + const float *cur_box = dev_boxes + cur_box_idx * 5; + int i = 0; + unsigned long long t = 0; + int start = 0; + if (row_start == col_start) { + start = threadIdx.x + 1; + } + for (i = start; i < col_size; i++) { + if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { + t |= 1ULL << i; + } + } + const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock); + dev_mask[cur_box_idx * col_blocks + col_start] = t; + } +} + +// boxes is a N x 5 tensor +at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh) { + using scalar_t = float; + AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor"); + auto scores = boxes.select(1, 4); + auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); + auto boxes_sorted = boxes.index_select(0, order_t); + + int boxes_num = boxes.size(0); + + const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock); + + scalar_t* boxes_dev = boxes_sorted.data(); + + THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState + + unsigned long long* mask_dev = NULL; + //THCudaCheck(THCudaMalloc(state, (void**) &mask_dev, + // boxes_num * col_blocks * sizeof(unsigned long long))); + + mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long)); + + dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock), + THCCeilDiv(boxes_num, threadsPerBlock)); + dim3 threads(threadsPerBlock); + nms_kernel<<>>(boxes_num, + nms_overlap_thresh, + boxes_dev, + mask_dev); + + std::vector mask_host(boxes_num * col_blocks); + THCudaCheck(cudaMemcpy(&mask_host[0], + mask_dev, + sizeof(unsigned long long) * boxes_num * col_blocks, + cudaMemcpyDeviceToHost)); + + std::vector remv(col_blocks); + memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); + + at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU)); + int64_t* keep_out = keep.data(); + + int num_to_keep = 0; + for (int i = 0; i < boxes_num; i++) { + int nblock = i / threadsPerBlock; + int inblock = i % threadsPerBlock; + + if (!(remv[nblock] & (1ULL << inblock))) { + keep_out[num_to_keep++] = i; + unsigned long long *p = &mask_host[0] + i * col_blocks; + for (int j = nblock; j < col_blocks; j++) { + remv[j] |= p[j]; + } + } + } + + THCudaFree(state, mask_dev); + // TODO improve this part + return std::get<0>(order_t.index({ + keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to( + order_t.device(), keep.scalar_type()) + }).sort(0, false)); +} \ No newline at end of file diff --git a/mmdet/ops/nms/src/soft_nms_cpu.pyx b/mmdet/ops/nms/src/soft_nms_cpu.pyx new file mode 100644 index 0000000..c35f8f1 --- /dev/null +++ b/mmdet/ops/nms/src/soft_nms_cpu.pyx @@ -0,0 +1,127 @@ +# ---------------------------------------------------------- +# Soft-NMS: Improving Object Detection With One Line of Code +# Copyright (c) University of Maryland, College Park +# Licensed under The MIT License [see LICENSE for details] +# Written by Navaneeth Bodla and Bharat Singh +# Modified by Kai Chen +# ---------------------------------------------------------- + +# cython: language_level=3, boundscheck=False + +import numpy as np +cimport numpy as np + + +cdef inline np.float32_t max(np.float32_t a, np.float32_t b): + return a if a >= b else b + +cdef inline np.float32_t min(np.float32_t a, np.float32_t b): + return a if a <= b else b + + +def soft_nms_cpu( + np.ndarray[float, ndim=2] boxes_in, + float iou_thr, + unsigned int method=1, + float sigma=0.5, + float min_score=0.001, +): + boxes = boxes_in.copy() + cdef unsigned int N = boxes.shape[0] + cdef float iw, ih, box_area + cdef float ua + cdef int pos = 0 + cdef float maxscore = 0 + cdef int maxpos = 0 + cdef float x1, x2, y1, y2, tx1, tx2, ty1, ty2, ts, area, weight, ov + inds = np.arange(N) + + for i in range(N): + maxscore = boxes[i, 4] + maxpos = i + + tx1 = boxes[i, 0] + ty1 = boxes[i, 1] + tx2 = boxes[i, 2] + ty2 = boxes[i, 3] + ts = boxes[i, 4] + ti = inds[i] + + pos = i + 1 + # get max box + while pos < N: + if maxscore < boxes[pos, 4]: + maxscore = boxes[pos, 4] + maxpos = pos + pos = pos + 1 + + # add max box as a detection + boxes[i, 0] = boxes[maxpos, 0] + boxes[i, 1] = boxes[maxpos, 1] + boxes[i, 2] = boxes[maxpos, 2] + boxes[i, 3] = boxes[maxpos, 3] + boxes[i, 4] = boxes[maxpos, 4] + inds[i] = inds[maxpos] + + # swap ith box with position of max box + boxes[maxpos, 0] = tx1 + boxes[maxpos, 1] = ty1 + boxes[maxpos, 2] = tx2 + boxes[maxpos, 3] = ty2 + boxes[maxpos, 4] = ts + inds[maxpos] = ti + + tx1 = boxes[i, 0] + ty1 = boxes[i, 1] + tx2 = boxes[i, 2] + ty2 = boxes[i, 3] + ts = boxes[i, 4] + + pos = i + 1 + # NMS iterations, note that N changes if detection boxes fall below + # threshold + while pos < N: + x1 = boxes[pos, 0] + y1 = boxes[pos, 1] + x2 = boxes[pos, 2] + y2 = boxes[pos, 3] + s = boxes[pos, 4] + + area = (x2 - x1 + 1) * (y2 - y1 + 1) + iw = (min(tx2, x2) - max(tx1, x1) + 1) + if iw > 0: + ih = (min(ty2, y2) - max(ty1, y1) + 1) + if ih > 0: + ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih) + ov = iw * ih / ua # iou between max box and detection box + + if method == 1: # linear + if ov > iou_thr: + weight = 1 - ov + else: + weight = 1 + elif method == 2: # gaussian + weight = np.exp(-(ov * ov) / sigma) + else: # original NMS + if ov > iou_thr: + weight = 0 + else: + weight = 1 + + boxes[pos, 4] = weight * boxes[pos, 4] + + # if box score falls below threshold, discard the box by + # swapping with last box update N + if boxes[pos, 4] < min_score: + boxes[pos, 0] = boxes[N-1, 0] + boxes[pos, 1] = boxes[N-1, 1] + boxes[pos, 2] = boxes[N-1, 2] + boxes[pos, 3] = boxes[N-1, 3] + boxes[pos, 4] = boxes[N-1, 4] + inds[pos] = inds[N - 1] + N = N - 1 + pos = pos - 1 + + pos = pos + 1 + + return boxes[:N], inds[:N] diff --git a/mmdet/ops/roi_align/__init__.py b/mmdet/ops/roi_align/__init__.py new file mode 100644 index 0000000..4cb0379 --- /dev/null +++ b/mmdet/ops/roi_align/__init__.py @@ -0,0 +1,4 @@ +from .functions.roi_align import roi_align +from .modules.roi_align import RoIAlign + +__all__ = ['roi_align', 'RoIAlign'] diff --git a/mmdet/ops/roi_align/functions/__init__.py b/mmdet/ops/roi_align/functions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mmdet/ops/roi_align/functions/roi_align.py b/mmdet/ops/roi_align/functions/roi_align.py new file mode 100644 index 0000000..096badd --- /dev/null +++ b/mmdet/ops/roi_align/functions/roi_align.py @@ -0,0 +1,61 @@ +from torch.autograd import Function + +from .. import roi_align_cuda + + +class RoIAlignFunction(Function): + + @staticmethod + def forward(ctx, features, rois, out_size, spatial_scale, sample_num=0): + if isinstance(out_size, int): + out_h = out_size + out_w = out_size + elif isinstance(out_size, tuple): + assert len(out_size) == 2 + assert isinstance(out_size[0], int) + assert isinstance(out_size[1], int) + out_h, out_w = out_size + else: + raise TypeError( + '"out_size" must be an integer or tuple of integers') + ctx.spatial_scale = spatial_scale + ctx.sample_num = sample_num + ctx.save_for_backward(rois) + ctx.feature_size = features.size() + + batch_size, num_channels, data_height, data_width = features.size() + num_rois = rois.size(0) + + output = features.new_zeros(num_rois, num_channels, out_h, out_w) + if features.is_cuda: + roi_align_cuda.forward(features, rois, out_h, out_w, spatial_scale, + sample_num, output) + else: + raise NotImplementedError + + return output + + @staticmethod + def backward(ctx, grad_output): + feature_size = ctx.feature_size + spatial_scale = ctx.spatial_scale + sample_num = ctx.sample_num + rois = ctx.saved_tensors[0] + assert (feature_size is not None and grad_output.is_cuda) + + batch_size, num_channels, data_height, data_width = feature_size + out_w = grad_output.size(3) + out_h = grad_output.size(2) + + grad_input = grad_rois = None + if ctx.needs_input_grad[0]: + grad_input = rois.new_zeros(batch_size, num_channels, data_height, + data_width) + roi_align_cuda.backward(grad_output.contiguous(), rois, out_h, + out_w, spatial_scale, sample_num, + grad_input) + + return grad_input, grad_rois, None, None, None + + +roi_align = RoIAlignFunction.apply diff --git a/mmdet/ops/roi_align/gradcheck.py b/mmdet/ops/roi_align/gradcheck.py new file mode 100644 index 0000000..394cd69 --- /dev/null +++ b/mmdet/ops/roi_align/gradcheck.py @@ -0,0 +1,29 @@ +import numpy as np +import torch +from torch.autograd import gradcheck + +import os.path as osp +import sys +sys.path.append(osp.abspath(osp.join(__file__, '../../'))) +from roi_align import RoIAlign # noqa: E402 + +feat_size = 15 +spatial_scale = 1.0 / 8 +img_size = feat_size / spatial_scale +num_imgs = 2 +num_rois = 20 + +batch_ind = np.random.randint(num_imgs, size=(num_rois, 1)) +rois = np.random.rand(num_rois, 4) * img_size * 0.5 +rois[:, 2:] += img_size * 0.5 +rois = np.hstack((batch_ind, rois)) + +feat = torch.randn( + num_imgs, 16, feat_size, feat_size, requires_grad=True, device='cuda:0') +rois = torch.from_numpy(rois).float().cuda() +inputs = (feat, rois) +print('Gradcheck for roi align...') +test = gradcheck(RoIAlign(3, spatial_scale), inputs, atol=1e-3, eps=1e-3) +print(test) +test = gradcheck(RoIAlign(3, spatial_scale, 2), inputs, atol=1e-3, eps=1e-3) +print(test) diff --git a/mmdet/ops/roi_align/modules/__init__.py b/mmdet/ops/roi_align/modules/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mmdet/ops/roi_align/modules/roi_align.py b/mmdet/ops/roi_align/modules/roi_align.py new file mode 100644 index 0000000..b83b74e --- /dev/null +++ b/mmdet/ops/roi_align/modules/roi_align.py @@ -0,0 +1,16 @@ +from torch.nn.modules.module import Module +from ..functions.roi_align import RoIAlignFunction + + +class RoIAlign(Module): + + def __init__(self, out_size, spatial_scale, sample_num=0): + super(RoIAlign, self).__init__() + + self.out_size = out_size + self.spatial_scale = float(spatial_scale) + self.sample_num = int(sample_num) + + def forward(self, features, rois): + return RoIAlignFunction.apply(features, rois, self.out_size, + self.spatial_scale, self.sample_num) diff --git a/mmdet/ops/roi_align/setup.py b/mmdet/ops/roi_align/setup.py new file mode 100644 index 0000000..f02a5ea --- /dev/null +++ b/mmdet/ops/roi_align/setup.py @@ -0,0 +1,12 @@ +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + +setup( + name='roi_align_cuda', + ext_modules=[ + CUDAExtension('roi_align_cuda', [ + 'src/roi_align_cuda.cpp', + 'src/roi_align_kernel.cu', + ]), + ], + cmdclass={'build_ext': BuildExtension}) diff --git a/mmdet/ops/roi_align/src/roi_align_cuda.cpp b/mmdet/ops/roi_align/src/roi_align_cuda.cpp new file mode 100644 index 0000000..06a73aa --- /dev/null +++ b/mmdet/ops/roi_align/src/roi_align_cuda.cpp @@ -0,0 +1,85 @@ +#include + +#include +#include + +int ROIAlignForwardLaucher(const at::Tensor features, const at::Tensor rois, + const float spatial_scale, const int sample_num, + const int channels, const int height, + const int width, const int num_rois, + const int pooled_height, const int pooled_width, + at::Tensor output); + +int ROIAlignBackwardLaucher(const at::Tensor top_grad, const at::Tensor rois, + const float spatial_scale, const int sample_num, + const int channels, const int height, + const int width, const int num_rois, + const int pooled_height, const int pooled_width, + at::Tensor bottom_grad); + +#define CHECK_CUDA(x) AT_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ") +#define CHECK_CONTIGUOUS(x) \ + AT_CHECK(x.is_contiguous(), #x, " must be contiguous ") +#define CHECK_INPUT(x) \ + CHECK_CUDA(x); \ + CHECK_CONTIGUOUS(x) + +int roi_align_forward_cuda(at::Tensor features, at::Tensor rois, + int pooled_height, int pooled_width, + float spatial_scale, int sample_num, + at::Tensor output) { + CHECK_INPUT(features); + CHECK_INPUT(rois); + CHECK_INPUT(output); + + // Number of ROIs + int num_rois = rois.size(0); + int size_rois = rois.size(1); + + if (size_rois != 5) { + printf("wrong roi size\n"); + return 0; + } + + int num_channels = features.size(1); + int data_height = features.size(2); + int data_width = features.size(3); + + ROIAlignForwardLaucher(features, rois, spatial_scale, sample_num, + num_channels, data_height, data_width, num_rois, + pooled_height, pooled_width, output); + + return 1; +} + +int roi_align_backward_cuda(at::Tensor top_grad, at::Tensor rois, + int pooled_height, int pooled_width, + float spatial_scale, int sample_num, + at::Tensor bottom_grad) { + CHECK_INPUT(top_grad); + CHECK_INPUT(rois); + CHECK_INPUT(bottom_grad); + + // Number of ROIs + int num_rois = rois.size(0); + int size_rois = rois.size(1); + if (size_rois != 5) { + printf("wrong roi size\n"); + return 0; + } + + int num_channels = bottom_grad.size(1); + int data_height = bottom_grad.size(2); + int data_width = bottom_grad.size(3); + + ROIAlignBackwardLaucher(top_grad, rois, spatial_scale, sample_num, + num_channels, data_height, data_width, num_rois, + pooled_height, pooled_width, bottom_grad); + + return 1; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", &roi_align_forward_cuda, "Roi_Align forward (CUDA)"); + m.def("backward", &roi_align_backward_cuda, "Roi_Align backward (CUDA)"); +} diff --git a/mmdet/ops/roi_align/src/roi_align_kernel.cu b/mmdet/ops/roi_align/src/roi_align_kernel.cu new file mode 100644 index 0000000..4655640 --- /dev/null +++ b/mmdet/ops/roi_align/src/roi_align_kernel.cu @@ -0,0 +1,294 @@ +#include +#include + +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ + i += blockDim.x * gridDim.x) + +#define THREADS_PER_BLOCK 1024 + +inline int GET_BLOCKS(const int N) { + int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; + int max_block_num = 65000; + return min(optimal_block_num, max_block_num); +} + +template +__device__ scalar_t bilinear_interpolate(const scalar_t *bottom_data, + const int height, const int width, + scalar_t y, scalar_t x) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + return 0; + } + + if (y <= 0) y = 0; + if (x <= 0) x = 0; + + int y_low = (int)y; + int x_low = (int)x; + int y_high; + int x_high; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (scalar_t)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (scalar_t)x_low; + } else { + x_high = x_low + 1; + } + + scalar_t ly = y - y_low; + scalar_t lx = x - x_low; + scalar_t hy = 1. - ly; + scalar_t hx = 1. - lx; + // do bilinear interpolation + scalar_t lt = bottom_data[y_low * width + x_low]; + scalar_t rt = bottom_data[y_low * width + x_high]; + scalar_t lb = bottom_data[y_high * width + x_low]; + scalar_t rb = bottom_data[y_high * width + x_high]; + scalar_t w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + scalar_t val = (w1 * lt + w2 * rt + w3 * lb + w4 * rb); + + return val; +} + +template +__global__ void ROIAlignForward(const int nthreads, const scalar_t *bottom_data, + const scalar_t *bottom_rois, + const scalar_t spatial_scale, + const int sample_num, const int channels, + const int height, const int width, + const int pooled_height, const int pooled_width, + scalar_t *top_data) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the aligned output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const scalar_t *offset_bottom_rois = bottom_rois + n * 5; + int roi_batch_ind = offset_bottom_rois[0]; + scalar_t roi_start_w = offset_bottom_rois[1] * spatial_scale; + scalar_t roi_start_h = offset_bottom_rois[2] * spatial_scale; + scalar_t roi_end_w = (offset_bottom_rois[3] + 1) * spatial_scale; + scalar_t roi_end_h = (offset_bottom_rois[4] + 1) * spatial_scale; + + // Force malformed ROIs to be 1x1 + scalar_t roi_width = fmaxf((scalar_t)roi_end_w - roi_start_w, 0.); + scalar_t roi_height = fmaxf((scalar_t)roi_end_h - roi_start_h, 0.); + + scalar_t bin_size_h = roi_height / pooled_height; + scalar_t bin_size_w = roi_width / pooled_width; + + const scalar_t *offset_bottom_data = + bottom_data + (roi_batch_ind * channels + c) * height * width; + + int sample_num_h = (sample_num > 0) + ? sample_num + : ceil(roi_height / pooled_height); // e.g., = 2 + int sample_num_w = + (sample_num > 0) ? sample_num : ceil(roi_width / pooled_width); + + scalar_t h = (scalar_t)(ph + 0.5) * bin_size_h + roi_start_h; + scalar_t w = (scalar_t)(pw + 0.5) * bin_size_w + roi_start_w; + + int hstart = fminf(floor(h), height - 2); + int wstart = fminf(floor(w), width - 2); + + scalar_t output_val = 0; + for (int iy = 0; iy < sample_num_h; iy++) { + const scalar_t y = roi_start_h + ph * bin_size_h + + (scalar_t)(iy + scalar_t(.5f)) * bin_size_h / + (scalar_t)(sample_num_h); + for (int ix = 0; ix < sample_num_w; ix++) { + const scalar_t x = roi_start_w + pw * bin_size_w + + (scalar_t)(ix + scalar_t(.5f)) * bin_size_w / + (scalar_t)(sample_num_w); + scalar_t val = bilinear_interpolate(offset_bottom_data, + height, width, y, x); + output_val += val; + } + } + output_val /= (sample_num_h * sample_num_w); + top_data[index] = output_val; + } +} + +int ROIAlignForwardLaucher(const at::Tensor features, const at::Tensor rois, + const float spatial_scale, const int sample_num, + const int channels, const int height, + const int width, const int num_rois, + const int pooled_height, const int pooled_width, + at::Tensor output) { + const int output_size = num_rois * pooled_height * pooled_width * channels; + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + features.type(), "ROIAlignLaucherForward", ([&] { + const scalar_t *bottom_data = features.data(); + const scalar_t *rois_data = rois.data(); + scalar_t *top_data = output.data(); + + ROIAlignForward + <<>>( + output_size, bottom_data, rois_data, scalar_t(spatial_scale), + sample_num, channels, height, width, pooled_height, + pooled_width, top_data); + })); + THCudaCheck(cudaGetLastError()); + return 1; +} + +template +__device__ void bilinear_interpolate_gradient(const int height, const int width, + scalar_t y, scalar_t x, + scalar_t &w1, scalar_t &w2, + scalar_t &w3, scalar_t &w4, + int &x_low, int &x_high, + int &y_low, int &y_high) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + w1 = w2 = w3 = w4 = 0.; + x_low = x_high = y_low = y_high = -1; + return; + } + + if (y <= 0) y = 0; + if (x <= 0) x = 0; + + y_low = (int)y; + x_low = (int)x; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (scalar_t)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (scalar_t)x_low; + } else { + x_high = x_low + 1; + } + + scalar_t ly = y - y_low; + scalar_t lx = x - x_low; + scalar_t hy = 1. - ly; + scalar_t hx = 1. - lx; + + w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + return; +} + +template +__global__ void ROIAlignBackward( + const int nthreads, const scalar_t *top_diff, const scalar_t *bottom_rois, + const scalar_t spatial_scale, const int sample_num, const int channels, + const int height, const int width, const int pooled_height, + const int pooled_width, scalar_t *bottom_diff) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the aligned output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const scalar_t *offset_bottom_rois = bottom_rois + n * 5; + int roi_batch_ind = offset_bottom_rois[0]; + scalar_t roi_start_w = offset_bottom_rois[1] * spatial_scale; + scalar_t roi_start_h = offset_bottom_rois[2] * spatial_scale; + scalar_t roi_end_w = (offset_bottom_rois[3] + 1) * spatial_scale; + scalar_t roi_end_h = (offset_bottom_rois[4] + 1) * spatial_scale; + + // Force malformed ROIs to be 1x1 + scalar_t roi_width = fmaxf((scalar_t)roi_end_w - roi_start_w, 0.); + scalar_t roi_height = fmaxf((scalar_t)roi_end_h - roi_start_h, 0.); + + scalar_t bin_size_h = roi_height / pooled_height; + scalar_t bin_size_w = roi_width / pooled_width; + + scalar_t *offset_bottom_diff = + bottom_diff + (roi_batch_ind * channels + c) * height * width; + int offset_top = (n * channels + c) * pooled_height * pooled_width + + ph * pooled_width + pw; + scalar_t offset_top_diff = top_diff[offset_top]; + + int sample_num_h = (sample_num > 0) + ? sample_num + : ceil(roi_height / pooled_height); // e.g., = 2 + int sample_num_w = + (sample_num > 0) ? sample_num : ceil(roi_width / pooled_width); + + const scalar_t count = (scalar_t)(sample_num_h * sample_num_w); + + scalar_t h = (scalar_t)(ph + 0.5) * bin_size_h + roi_start_h; + scalar_t w = (scalar_t)(pw + 0.5) * bin_size_w + roi_start_w; + + int hstart = fminf(floor(h), height - 2); + int wstart = fminf(floor(w), width - 2); + + for (int iy = 0; iy < sample_num_h; iy++) { + const scalar_t y = + roi_start_h + ph * bin_size_h + + (scalar_t)(iy + .5f) * bin_size_h / (scalar_t)(sample_num_h); + for (int ix = 0; ix < sample_num_w; ix++) { + const scalar_t x = + roi_start_w + pw * bin_size_w + + (scalar_t)(ix + .5f) * bin_size_w / (scalar_t)(sample_num_w); + scalar_t w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + + bilinear_interpolate_gradient( + height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high); + scalar_t g1 = offset_top_diff * w1 / count; + scalar_t g2 = offset_top_diff * w2 / count; + scalar_t g3 = offset_top_diff * w3 / count; + scalar_t g4 = offset_top_diff * w4 / count; + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + atomicAdd(offset_bottom_diff + y_low * width + x_low, g1); + atomicAdd(offset_bottom_diff + y_low * width + x_high, g2); + atomicAdd(offset_bottom_diff + y_high * width + x_low, g3); + atomicAdd(offset_bottom_diff + y_high * width + x_high, g4); + } + } + } + } +} + +int ROIAlignBackwardLaucher(const at::Tensor top_grad, const at::Tensor rois, + const float spatial_scale, const int sample_num, + const int channels, const int height, + const int width, const int num_rois, + const int pooled_height, const int pooled_width, + at::Tensor bottom_grad) { + const int output_size = num_rois * pooled_height * pooled_width * channels; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + top_grad.type(), "ROIAlignLaucherBackward", ([&] { + const scalar_t *top_diff = top_grad.data(); + const scalar_t *rois_data = rois.data(); + scalar_t *bottom_diff = bottom_grad.data(); + if (sizeof(scalar_t) == sizeof(double)) { + fprintf(stderr, "double is not supported\n"); + exit(-1); + } + + ROIAlignBackward + <<>>( + output_size, top_diff, rois_data, spatial_scale, sample_num, + channels, height, width, pooled_height, pooled_width, + bottom_diff); + })); + THCudaCheck(cudaGetLastError()); + return 1; +} diff --git a/mmdet/ops/roi_pool/__init__.py b/mmdet/ops/roi_pool/__init__.py new file mode 100644 index 0000000..eb2c57e --- /dev/null +++ b/mmdet/ops/roi_pool/__init__.py @@ -0,0 +1,4 @@ +from .functions.roi_pool import roi_pool +from .modules.roi_pool import RoIPool + +__all__ = ['roi_pool', 'RoIPool'] diff --git a/mmdet/ops/roi_pool/functions/__init__.py b/mmdet/ops/roi_pool/functions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mmdet/ops/roi_pool/functions/roi_pool.py b/mmdet/ops/roi_pool/functions/roi_pool.py new file mode 100644 index 0000000..068da60 --- /dev/null +++ b/mmdet/ops/roi_pool/functions/roi_pool.py @@ -0,0 +1,55 @@ +import torch +from torch.autograd import Function + +from .. import roi_pool_cuda + + +class RoIPoolFunction(Function): + + @staticmethod + def forward(ctx, features, rois, out_size, spatial_scale): + if isinstance(out_size, int): + out_h = out_size + out_w = out_size + elif isinstance(out_size, tuple): + assert len(out_size) == 2 + assert isinstance(out_size[0], int) + assert isinstance(out_size[1], int) + out_h, out_w = out_size + else: + raise TypeError( + '"out_size" must be an integer or tuple of integers') + assert features.is_cuda + ctx.save_for_backward(rois) + num_channels = features.size(1) + num_rois = rois.size(0) + out_size = (num_rois, num_channels, out_h, out_w) + output = features.new_zeros(out_size) + argmax = features.new_zeros(out_size, dtype=torch.int) + roi_pool_cuda.forward(features, rois, out_h, out_w, spatial_scale, + output, argmax) + ctx.spatial_scale = spatial_scale + ctx.feature_size = features.size() + ctx.argmax = argmax + + return output + + @staticmethod + def backward(ctx, grad_output): + assert grad_output.is_cuda + spatial_scale = ctx.spatial_scale + feature_size = ctx.feature_size + argmax = ctx.argmax + rois = ctx.saved_tensors[0] + assert feature_size is not None + + grad_input = grad_rois = None + if ctx.needs_input_grad[0]: + grad_input = grad_output.new_zeros(feature_size) + roi_pool_cuda.backward(grad_output.contiguous(), rois, argmax, + spatial_scale, grad_input) + + return grad_input, grad_rois, None, None + + +roi_pool = RoIPoolFunction.apply diff --git a/mmdet/ops/roi_pool/gradcheck.py b/mmdet/ops/roi_pool/gradcheck.py new file mode 100644 index 0000000..c396160 --- /dev/null +++ b/mmdet/ops/roi_pool/gradcheck.py @@ -0,0 +1,15 @@ +import torch +from torch.autograd import gradcheck + +import os.path as osp +import sys +sys.path.append(osp.abspath(osp.join(__file__, '../../'))) +from roi_pool import RoIPool # noqa: E402 + +feat = torch.randn(4, 16, 15, 15, requires_grad=True).cuda() +rois = torch.Tensor([[0, 0, 0, 50, 50], [0, 10, 30, 43, 55], + [1, 67, 40, 110, 120]]).cuda() +inputs = (feat, rois) +print('Gradcheck for roi pooling...') +test = gradcheck(RoIPool(4, 1.0 / 8), inputs, eps=1e-5, atol=1e-3) +print(test) diff --git a/mmdet/ops/roi_pool/modules/__init__.py b/mmdet/ops/roi_pool/modules/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mmdet/ops/roi_pool/modules/roi_pool.py b/mmdet/ops/roi_pool/modules/roi_pool.py new file mode 100644 index 0000000..d7fffd0 --- /dev/null +++ b/mmdet/ops/roi_pool/modules/roi_pool.py @@ -0,0 +1,14 @@ +from torch.nn.modules.module import Module +from ..functions.roi_pool import roi_pool + + +class RoIPool(Module): + + def __init__(self, out_size, spatial_scale): + super(RoIPool, self).__init__() + + self.out_size = out_size + self.spatial_scale = float(spatial_scale) + + def forward(self, features, rois): + return roi_pool(features, rois, self.out_size, self.spatial_scale) diff --git a/mmdet/ops/roi_pool/setup.py b/mmdet/ops/roi_pool/setup.py new file mode 100644 index 0000000..16991b8 --- /dev/null +++ b/mmdet/ops/roi_pool/setup.py @@ -0,0 +1,12 @@ +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + +setup( + name='roi_pool', + ext_modules=[ + CUDAExtension('roi_pool_cuda', [ + 'src/roi_pool_cuda.cpp', + 'src/roi_pool_kernel.cu', + ]) + ], + cmdclass={'build_ext': BuildExtension}) diff --git a/mmdet/ops/roi_pool/src/roi_pool_cuda.cpp b/mmdet/ops/roi_pool/src/roi_pool_cuda.cpp new file mode 100644 index 0000000..84c4e4b --- /dev/null +++ b/mmdet/ops/roi_pool/src/roi_pool_cuda.cpp @@ -0,0 +1,86 @@ +#include + +#include +#include + +int ROIPoolForwardLaucher(const at::Tensor features, const at::Tensor rois, + const float spatial_scale, const int channels, + const int height, const int width, const int num_rois, + const int pooled_h, const int pooled_w, + at::Tensor output, at::Tensor argmax); + +int ROIPoolBackwardLaucher(const at::Tensor top_grad, const at::Tensor rois, + const at::Tensor argmax, const float spatial_scale, + const int batch_size, const int channels, + const int height, const int width, + const int num_rois, const int pooled_h, + const int pooled_w, at::Tensor bottom_grad); + +#define CHECK_CUDA(x) AT_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ") +#define CHECK_CONTIGUOUS(x) \ + AT_CHECK(x.is_contiguous(), #x, " must be contiguous ") +#define CHECK_INPUT(x) \ + CHECK_CUDA(x); \ + CHECK_CONTIGUOUS(x) + +int roi_pooling_forward_cuda(at::Tensor features, at::Tensor rois, + int pooled_height, int pooled_width, + float spatial_scale, at::Tensor output, + at::Tensor argmax) { + CHECK_INPUT(features); + CHECK_INPUT(rois); + CHECK_INPUT(output); + CHECK_INPUT(argmax); + + // Number of ROIs + int num_rois = rois.size(0); + int size_rois = rois.size(1); + + if (size_rois != 5) { + printf("wrong roi size\n"); + return 0; + } + + int channels = features.size(1); + int height = features.size(2); + int width = features.size(3); + + ROIPoolForwardLaucher(features, rois, spatial_scale, channels, height, width, + num_rois, pooled_height, pooled_width, output, argmax); + + return 1; +} + +int roi_pooling_backward_cuda(at::Tensor top_grad, at::Tensor rois, + at::Tensor argmax, float spatial_scale, + at::Tensor bottom_grad) { + CHECK_INPUT(top_grad); + CHECK_INPUT(rois); + CHECK_INPUT(argmax); + CHECK_INPUT(bottom_grad); + + int pooled_height = top_grad.size(2); + int pooled_width = top_grad.size(3); + int num_rois = rois.size(0); + int size_rois = rois.size(1); + + if (size_rois != 5) { + printf("wrong roi size\n"); + return 0; + } + int batch_size = bottom_grad.size(0); + int channels = bottom_grad.size(1); + int height = bottom_grad.size(2); + int width = bottom_grad.size(3); + + ROIPoolBackwardLaucher(top_grad, rois, argmax, spatial_scale, batch_size, + channels, height, width, num_rois, pooled_height, + pooled_width, bottom_grad); + + return 1; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", &roi_pooling_forward_cuda, "Roi_Pooling forward (CUDA)"); + m.def("backward", &roi_pooling_backward_cuda, "Roi_Pooling backward (CUDA)"); +} diff --git a/mmdet/ops/roi_pool/src/roi_pool_kernel.cu b/mmdet/ops/roi_pool/src/roi_pool_kernel.cu new file mode 100644 index 0000000..b51bb04 --- /dev/null +++ b/mmdet/ops/roi_pool/src/roi_pool_kernel.cu @@ -0,0 +1,156 @@ +#include +#include + +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ + i += blockDim.x * gridDim.x) + +#define THREADS_PER_BLOCK 1024 + +inline int GET_BLOCKS(const int N) { + int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; + int max_block_num = 65000; + return min(optimal_block_num, max_block_num); +} + +template +__global__ void ROIPoolForward(const int nthreads, const scalar_t *bottom_data, + const scalar_t *rois, + const scalar_t spatial_scale, const int channels, + const int height, const int width, + const int pooled_h, const int pooled_w, + scalar_t *top_data, int *argmax_data) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_w; + int ph = (index / pooled_w) % pooled_h; + int c = (index / pooled_w / pooled_h) % channels; + int n = index / pooled_w / pooled_h / channels; + + const scalar_t *offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + // calculate the roi region on feature maps + scalar_t roi_x1 = offset_rois[1] * spatial_scale; + scalar_t roi_y1 = offset_rois[2] * spatial_scale; + scalar_t roi_x2 = (offset_rois[3] + 1) * spatial_scale; + scalar_t roi_y2 = (offset_rois[4] + 1) * spatial_scale; + + // force malformed rois to be 1x1 + scalar_t roi_w = roi_x2 - roi_x1; + scalar_t roi_h = roi_y2 - roi_y1; + if (roi_w <= 0 || roi_h <= 0) continue; + + scalar_t bin_size_w = roi_w / static_cast(pooled_w); + scalar_t bin_size_h = roi_h / static_cast(pooled_h); + + // the corresponding bin region + int bin_x1 = floor(static_cast(pw) * bin_size_w + roi_x1); + int bin_y1 = floor(static_cast(ph) * bin_size_h + roi_y1); + int bin_x2 = ceil(static_cast(pw + 1) * bin_size_w + roi_x1); + int bin_y2 = ceil(static_cast(ph + 1) * bin_size_h + roi_y1); + + // add roi offsets and clip to input boundaries + bin_x1 = min(max(bin_x1, 0), width); + bin_y1 = min(max(bin_y1, 0), height); + bin_x2 = min(max(bin_x2, 0), width); + bin_y2 = min(max(bin_y2, 0), height); + bool is_empty = (bin_y2 <= bin_y1) || (bin_x2 <= bin_x1); + + // If nothing is pooled, argmax = -1 causes nothing to be backprop'd + int max_idx = -1; + bottom_data += (roi_batch_ind * channels + c) * height * width; + + // Define an empty pooling region to be zero + scalar_t max_val = is_empty ? static_cast(0) + : bottom_data[bin_y1 * width + bin_x1] - 1; + + for (int h = bin_y1; h < bin_y2; ++h) { + for (int w = bin_x1; w < bin_x2; ++w) { + int offset = h * width + w; + if (bottom_data[offset] > max_val) { + max_val = bottom_data[offset]; + max_idx = offset; + } + } + } + top_data[index] = max_val; + if (argmax_data != NULL) argmax_data[index] = max_idx; + } +} + +int ROIPoolForwardLaucher(const at::Tensor features, const at::Tensor rois, + const float spatial_scale, const int channels, + const int height, const int width, const int num_rois, + const int pooled_h, const int pooled_w, + at::Tensor output, at::Tensor argmax) { + const int output_size = num_rois * channels * pooled_h * pooled_w; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + features.type(), "ROIPoolLaucherForward", ([&] { + const scalar_t *bottom_data = features.data(); + const scalar_t *rois_data = rois.data(); + scalar_t *top_data = output.data(); + int *argmax_data = argmax.data(); + + ROIPoolForward + <<>>( + output_size, bottom_data, rois_data, scalar_t(spatial_scale), + channels, height, width, pooled_h, pooled_w, top_data, + argmax_data); + })); + THCudaCheck(cudaGetLastError()); + return 1; +} + +template +__global__ void ROIPoolBackward(const int nthreads, const scalar_t *top_diff, + const scalar_t *rois, const int *argmax_data, + const scalar_t spatial_scale, + const int channels, const int height, + const int width, const int pooled_h, + const int pooled_w, scalar_t *bottom_diff) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + int pw = index % pooled_w; + int ph = (index / pooled_w) % pooled_h; + int c = (index / pooled_w / pooled_h) % channels; + int n = index / pooled_w / pooled_h / channels; + + int roi_batch_ind = rois[n * 5]; + int bottom_index = argmax_data[(n * channels + c) * pooled_h * pooled_w + + ph * pooled_w + pw]; + + atomicAdd(bottom_diff + (roi_batch_ind * channels + c) * height * width + + bottom_index, + top_diff[index]); + } +} + +int ROIPoolBackwardLaucher(const at::Tensor top_grad, const at::Tensor rois, + const at::Tensor argmax, const float spatial_scale, + const int batch_size, const int channels, + const int height, const int width, + const int num_rois, const int pooled_h, + const int pooled_w, at::Tensor bottom_grad) { + const int output_size = num_rois * pooled_h * pooled_w * channels; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + top_grad.type(), "ROIPoolLaucherBackward", ([&] { + const scalar_t *top_diff = top_grad.data(); + const scalar_t *rois_data = rois.data(); + const int *argmax_data = argmax.data(); + scalar_t *bottom_diff = bottom_grad.data(); + + if (sizeof(scalar_t) == sizeof(double)) { + fprintf(stderr, "double is not supported\n"); + exit(-1); + } + + ROIPoolBackward + <<>>( + output_size, top_diff, rois_data, argmax_data, + scalar_t(spatial_scale), channels, height, width, pooled_h, + pooled_w, bottom_diff); + })); + THCudaCheck(cudaGetLastError()); + return 1; +} diff --git a/mmdet/ops/sigmoid_focal_loss/__init__.py b/mmdet/ops/sigmoid_focal_loss/__init__.py new file mode 100644 index 0000000..d0e5abd --- /dev/null +++ b/mmdet/ops/sigmoid_focal_loss/__init__.py @@ -0,0 +1,3 @@ +from .modules.sigmoid_focal_loss import SigmoidFocalLoss, sigmoid_focal_loss + +__all__ = ['SigmoidFocalLoss', 'sigmoid_focal_loss'] diff --git a/mmdet/ops/sigmoid_focal_loss/functions/__init__.py b/mmdet/ops/sigmoid_focal_loss/functions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mmdet/ops/sigmoid_focal_loss/functions/sigmoid_focal_loss.py b/mmdet/ops/sigmoid_focal_loss/functions/sigmoid_focal_loss.py new file mode 100644 index 0000000..803df41 --- /dev/null +++ b/mmdet/ops/sigmoid_focal_loss/functions/sigmoid_focal_loss.py @@ -0,0 +1,42 @@ +import torch.nn.functional as F +from torch.autograd import Function +from torch.autograd.function import once_differentiable + +from .. import sigmoid_focal_loss_cuda + + +class SigmoidFocalLossFunction(Function): + + @staticmethod + def forward(ctx, input, target, gamma=2.0, alpha=0.25, reduction='mean'): + ctx.save_for_backward(input, target) + num_classes = input.shape[1] + ctx.num_classes = num_classes + ctx.gamma = gamma + ctx.alpha = alpha + + loss = sigmoid_focal_loss_cuda.forward(input, target, num_classes, + gamma, alpha) + reduction_enum = F._Reduction.get_enum(reduction) + # none: 0, mean:1, sum: 2 + if reduction_enum == 0: + return loss + elif reduction_enum == 1: + return loss.mean() + elif reduction_enum == 2: + return loss.sum() + + @staticmethod + @once_differentiable + def backward(ctx, d_loss): + input, target = ctx.saved_tensors + num_classes = ctx.num_classes + gamma = ctx.gamma + alpha = ctx.alpha + d_loss = d_loss.contiguous() + d_input = sigmoid_focal_loss_cuda.backward(input, target, d_loss, + num_classes, gamma, alpha) + return d_input, None, None, None, None + + +sigmoid_focal_loss = SigmoidFocalLossFunction.apply diff --git a/mmdet/ops/sigmoid_focal_loss/modules/__init__.py b/mmdet/ops/sigmoid_focal_loss/modules/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mmdet/ops/sigmoid_focal_loss/modules/sigmoid_focal_loss.py b/mmdet/ops/sigmoid_focal_loss/modules/sigmoid_focal_loss.py new file mode 100644 index 0000000..3caff39 --- /dev/null +++ b/mmdet/ops/sigmoid_focal_loss/modules/sigmoid_focal_loss.py @@ -0,0 +1,23 @@ +from torch import nn + +from ..functions.sigmoid_focal_loss import sigmoid_focal_loss + + +class SigmoidFocalLoss(nn.Module): + + def __init__(self, gamma, alpha): + super(SigmoidFocalLoss, self).__init__() + self.gamma = gamma + self.alpha = alpha + + def forward(self, logits, targets): + assert logits.is_cuda + loss = sigmoid_focal_loss(logits, targets, self.gamma, self.alpha) + return loss.sum() + + def __repr__(self): + tmpstr = self.__class__.__name__ + "(" + tmpstr += "gamma=" + str(self.gamma) + tmpstr += ", alpha=" + str(self.alpha) + tmpstr += ")" + return tmpstr diff --git a/mmdet/ops/sigmoid_focal_loss/setup.py b/mmdet/ops/sigmoid_focal_loss/setup.py new file mode 100644 index 0000000..a70c654 --- /dev/null +++ b/mmdet/ops/sigmoid_focal_loss/setup.py @@ -0,0 +1,12 @@ +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + +setup( + name='SigmoidFocalLoss', + ext_modules=[ + CUDAExtension('sigmoid_focal_loss_cuda', [ + 'src/sigmoid_focal_loss.cpp', + 'src/sigmoid_focal_loss_cuda.cu', + ]), + ], + cmdclass={'build_ext': BuildExtension}) diff --git a/mmdet/ops/sigmoid_focal_loss/src/sigmoid_focal_loss.cpp b/mmdet/ops/sigmoid_focal_loss/src/sigmoid_focal_loss.cpp new file mode 100644 index 0000000..2042751 --- /dev/null +++ b/mmdet/ops/sigmoid_focal_loss/src/sigmoid_focal_loss.cpp @@ -0,0 +1,43 @@ +// modify from +// https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/csrc/SigmoidFocalLoss.h +#include + +at::Tensor SigmoidFocalLoss_forward_cuda(const at::Tensor &logits, + const at::Tensor &targets, + const int num_classes, + const float gamma, const float alpha); + +at::Tensor SigmoidFocalLoss_backward_cuda(const at::Tensor &logits, + const at::Tensor &targets, + const at::Tensor &d_losses, + const int num_classes, + const float gamma, const float alpha); + +// Interface for Python +at::Tensor SigmoidFocalLoss_forward(const at::Tensor &logits, + const at::Tensor &targets, + const int num_classes, const float gamma, + const float alpha) { + if (logits.type().is_cuda()) { + return SigmoidFocalLoss_forward_cuda(logits, targets, num_classes, gamma, + alpha); + } +} + +at::Tensor SigmoidFocalLoss_backward(const at::Tensor &logits, + const at::Tensor &targets, + const at::Tensor &d_losses, + const int num_classes, const float gamma, + const float alpha) { + if (logits.type().is_cuda()) { + return SigmoidFocalLoss_backward_cuda(logits, targets, d_losses, + num_classes, gamma, alpha); + } +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", &SigmoidFocalLoss_forward, + "SigmoidFocalLoss forward (CUDA)"); + m.def("backward", &SigmoidFocalLoss_backward, + "SigmoidFocalLoss backward (CUDA)"); +} diff --git a/mmdet/ops/sigmoid_focal_loss/src/sigmoid_focal_loss_cuda.cu b/mmdet/ops/sigmoid_focal_loss/src/sigmoid_focal_loss_cuda.cu new file mode 100644 index 0000000..aa1e4b9 --- /dev/null +++ b/mmdet/ops/sigmoid_focal_loss/src/sigmoid_focal_loss_cuda.cu @@ -0,0 +1,169 @@ +// modify from +// https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu + +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +// This file is modified from +// https://github.com/pytorch/pytorch/blob/master/modules/detectron/sigmoid_focal_loss_op.cu +// Cheng-Yang Fu +// cyfu@cs.unc.edu +#include +#include + +#include +#include +#include + +#include + +// TODO make it in a common file +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ + i += blockDim.x * gridDim.x) + +template +__global__ void SigmoidFocalLossForward(const int nthreads, + const scalar_t *logits, + const long *targets, + const int num_classes, + const float gamma, const float alpha, + const int num, scalar_t *losses) { + CUDA_1D_KERNEL_LOOP(i, nthreads) { + int n = i / num_classes; + int d = i % num_classes; // current class[0~79]; + int t = targets[n]; // target class [1~80]; + + // Decide it is positive or negative case. + scalar_t c1 = (t == (d + 1)); + scalar_t c2 = (t >= 0 & t != (d + 1)); + + scalar_t zn = (1.0 - alpha); + scalar_t zp = (alpha); + + // p = 1. / 1. + expf(-x); p = sigmoid(x) + scalar_t p = 1. / (1. + expf(-logits[i])); + + // (1-p)**gamma * log(p) where + scalar_t term1 = powf((1. - p), gamma) * logf(max(p, FLT_MIN)); + + // p**gamma * log(1-p) + scalar_t term2 = + powf(p, gamma) * + (-1. * logits[i] * (logits[i] >= 0) - + logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))); + + losses[i] = 0.0; + losses[i] += -c1 * term1 * zp; + losses[i] += -c2 * term2 * zn; + + } // CUDA_1D_KERNEL_LOOP +} // SigmoidFocalLossForward + +template +__global__ void SigmoidFocalLossBackward( + const int nthreads, const scalar_t *logits, const long *targets, + const scalar_t *d_losses, const int num_classes, const float gamma, + const float alpha, const int num, scalar_t *d_logits) { + CUDA_1D_KERNEL_LOOP(i, nthreads) { + int n = i / num_classes; + int d = i % num_classes; // current class[0~79]; + int t = targets[n]; // target class [1~80], 0 is background; + + // Decide it is positive or negative case. + scalar_t c1 = (t == (d + 1)); + scalar_t c2 = (t >= 0 & t != (d + 1)); + + scalar_t zn = (1.0 - alpha); + scalar_t zp = (alpha); + // p = 1. / 1. + expf(-x); p = sigmoid(x) + scalar_t p = 1. / (1. + expf(-logits[i])); + + // (1-p)**g * (1 - p - g*p*log(p) + scalar_t term1 = + powf((1. - p), gamma) * (1. - p - (p * gamma * logf(max(p, FLT_MIN)))); + + // (p**g) * (g*(1-p)*log(1-p) - p) + scalar_t term2 = + powf(p, gamma) * + ((-1. * logits[i] * (logits[i] >= 0) - + logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))) * + (1. - p) * gamma - + p); + d_logits[i] = 0.0; + d_logits[i] += -c1 * term1 * zp; + d_logits[i] += -c2 * term2 * zn; + d_logits[i] = d_logits[i] * d_losses[i]; + + } // CUDA_1D_KERNEL_LOOP +} // SigmoidFocalLossBackward + +at::Tensor SigmoidFocalLoss_forward_cuda(const at::Tensor &logits, + const at::Tensor &targets, + const int num_classes, + const float gamma, const float alpha) { + AT_ASSERTM(logits.type().is_cuda(), "logits must be a CUDA tensor"); + AT_ASSERTM(targets.type().is_cuda(), "targets must be a CUDA tensor"); + AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); + + const int num_samples = logits.size(0); + + auto losses = at::empty({num_samples, logits.size(1)}, logits.options()); + auto losses_size = num_samples * logits.size(1); + + dim3 grid(std::min(THCCeilDiv(losses_size, 512L), 4096L)); + dim3 block(512); + + if (losses.numel() == 0) { + THCudaCheck(cudaGetLastError()); + return losses; + } + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + logits.type(), "SigmoidFocalLoss_forward", [&] { + SigmoidFocalLossForward<<>>( + losses_size, logits.contiguous().data(), + targets.contiguous().data(), num_classes, gamma, alpha, + num_samples, losses.data()); + }); + THCudaCheck(cudaGetLastError()); + return losses; +} + +at::Tensor SigmoidFocalLoss_backward_cuda(const at::Tensor &logits, + const at::Tensor &targets, + const at::Tensor &d_losses, + const int num_classes, + const float gamma, + const float alpha) { + AT_ASSERTM(logits.type().is_cuda(), "logits must be a CUDA tensor"); + AT_ASSERTM(targets.type().is_cuda(), "targets must be a CUDA tensor"); + AT_ASSERTM(d_losses.type().is_cuda(), "d_losses must be a CUDA tensor"); + + AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); + + const int num_samples = logits.size(0); + AT_ASSERTM(logits.size(1) == num_classes, + "logits.size(1) should be num_classes"); + + auto d_logits = at::zeros({num_samples, num_classes}, logits.options()); + auto d_logits_size = num_samples * logits.size(1); + + dim3 grid(std::min(THCCeilDiv(d_logits_size, 512L), 4096L)); + dim3 block(512); + + if (d_logits.numel() == 0) { + THCudaCheck(cudaGetLastError()); + return d_logits; + } + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + logits.type(), "SigmoidFocalLoss_backward", [&] { + SigmoidFocalLossBackward<<>>( + d_logits_size, logits.contiguous().data(), + targets.contiguous().data(), + d_losses.contiguous().data(), num_classes, gamma, alpha, + num_samples, d_logits.data()); + }); + + THCudaCheck(cudaGetLastError()); + return d_logits; +} diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..b79eb4d --- /dev/null +++ b/setup.py @@ -0,0 +1,112 @@ +import os +import subprocess +import time +from setuptools import find_packages, setup + + +def readme(): + with open('README.md', encoding='utf-8') as f: + content = f.read() + return content + + +MAJOR = 0 +MINOR = 6 +PATCH = 0 +SUFFIX = '' +SHORT_VERSION = '{}.{}.{}{}'.format(MAJOR, MINOR, PATCH, SUFFIX) + +version_file = 'mmdet/version.py' + + +def get_git_hash(): + + def _minimal_ext_cmd(cmd): + # construct minimal environment + env = {} + for k in ['SYSTEMROOT', 'PATH', 'HOME']: + v = os.environ.get(k) + if v is not None: + env[k] = v + # LANGUAGE is used on win32 + env['LANGUAGE'] = 'C' + env['LANG'] = 'C' + env['LC_ALL'] = 'C' + out = subprocess.Popen( + cmd, stdout=subprocess.PIPE, env=env).communicate()[0] + return out + + try: + out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) + sha = out.strip().decode('ascii') + except OSError: + sha = 'unknown' + + return sha + + +def get_hash(): + if os.path.exists('.git'): + sha = get_git_hash()[:7] + elif os.path.exists(version_file): + try: + from mmdet.version import __version__ + sha = __version__.split('+')[-1] + except ImportError: + raise ImportError('Unable to get git version') + else: + sha = 'unknown' + + return sha + + +def write_version_py(): + content = """# GENERATED VERSION FILE +# TIME: {} + +__version__ = '{}' +short_version = '{}' +""" + sha = get_hash() + VERSION = SHORT_VERSION + '+' + sha + + with open(version_file, 'w') as f: + f.write(content.format(time.asctime(), VERSION, SHORT_VERSION)) + + +def get_version(): + with open(version_file, 'r') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +if __name__ == '__main__': + write_version_py() + setup( + name='mmdet', + version=get_version(), + description='Open MMLab Detection Toolbox', + long_description=readme(), + keywords='computer vision, object detection', + url='https://github.com/open-mmlab/mmdetection', + packages=find_packages(exclude=('configs', 'tools', 'demo')), + package_data={'mmdet.ops': ['*/*.so']}, + classifiers=[ + 'Development Status :: 4 - Beta', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + ], + license='GPLv3', + setup_requires=['pytest-runner'], + tests_require=['pytest'], + install_requires=[ + 'mmcv>=0.2.6', 'numpy', 'matplotlib', 'six', 'terminaltables', + 'pycocotools' + ], + zip_safe=False) diff --git a/tools/analyze_logs.py b/tools/analyze_logs.py new file mode 100644 index 0000000..c9f603f --- /dev/null +++ b/tools/analyze_logs.py @@ -0,0 +1,178 @@ +import argparse +import json +from collections import defaultdict + +import matplotlib.pyplot as plt +import numpy as np +import seaborn as sns + + +def cal_train_time(log_dicts, args): + for i, log_dict in enumerate(log_dicts): + print('{}Analyze train time of {}{}'.format('-' * 5, args.json_logs[i], + '-' * 5)) + all_times = [] + for epoch in log_dict.keys(): + if args.include_outliers: + all_times.append(log_dict[epoch]['time']) + else: + all_times.append(log_dict[epoch]['time'][1:]) + all_times = np.array(all_times) + epoch_ave_time = all_times.mean(-1) + slowest_epoch = epoch_ave_time.argmax() + fastest_epoch = epoch_ave_time.argmin() + std_over_epoch = epoch_ave_time.std() + print('slowest epoch {}, average time is {:.4f}'.format( + slowest_epoch + 1, epoch_ave_time[slowest_epoch])) + print('fastest epoch {}, average time is {:.4f}'.format( + fastest_epoch + 1, epoch_ave_time[fastest_epoch])) + print('time std over epochs is {:.4f}'.format(std_over_epoch)) + print('average iter time: {:.4f} s/iter'.format(np.mean(all_times))) + print() + + +def plot_curve(log_dicts, args): + if args.backend is not None: + plt.switch_backend(args.backend) + sns.set_style(args.style) + # if legend is None, use {filename}_{key} as legend + legend = args.legend + if legend is None: + legend = [] + for json_log in args.json_logs: + for metric in args.keys: + legend.append('{}_{}'.format(json_log, metric)) + assert len(legend) == (len(args.json_logs) * len(args.keys)) + metrics = args.keys + + num_metrics = len(metrics) + for i, log_dict in enumerate(log_dicts): + epochs = list(log_dict.keys()) + for j, metric in enumerate(metrics): + print('plot curve of {}, metric is {}'.format( + args.json_logs[i], metric)) + assert metric in log_dict[epochs[ + 0]], '{} does not contain metric {}'.format( + args.json_logs[i], metric) + + if 'mAP' in metric: + xs = np.arange(1, max(epochs) + 1) + ys = [] + for epoch in epochs: + ys += log_dict[epoch][metric] + ax = plt.gca() + ax.set_xticks(xs) + plt.xlabel('epoch') + plt.plot(xs, ys, label=legend[i * num_metrics + j], marker='o') + else: + xs = [] + ys = [] + num_iters_per_epoch = log_dict[epochs[0]]['iter'][-1] + for epoch in epochs: + iters = log_dict[epoch]['iter'] + if log_dict[epoch]['mode'][-1] == 'val': + iters = iters[:-1] + xs.append( + np.array(iters) + (epoch - 1) * num_iters_per_epoch) + ys.append(np.array(log_dict[epoch][metric][:len(iters)])) + xs = np.concatenate(xs) + ys = np.concatenate(ys) + plt.xlabel('iter') + plt.plot( + xs, ys, label=legend[i * num_metrics + j], linewidth=0.5) + plt.legend() + if args.title is not None: + plt.title(args.title) + if args.out is None: + plt.show() + else: + print('save curve to: {}'.format(args.out)) + plt.savefig(args.out) + plt.cla() + + +def add_plot_parser(subparsers): + parser_plt = subparsers.add_parser( + 'plot_curve', help='parser for plotting curves') + parser_plt.add_argument( + 'json_logs', + type=str, + nargs='+', + help='path of train log in json format') + parser_plt.add_argument( + '--keys', + type=str, + nargs='+', + default=['bbox_mAP'], + help='the metric that you want to plot') + parser_plt.add_argument('--title', type=str, help='title of figure') + parser_plt.add_argument( + '--legend', + type=str, + nargs='+', + default=None, + help='legend of each plot') + parser_plt.add_argument( + '--backend', type=str, default=None, help='backend of plt') + parser_plt.add_argument( + '--style', type=str, default='dark', help='style of plt') + parser_plt.add_argument('--out', type=str, default=None) + + +def add_time_parser(subparsers): + parser_time = subparsers.add_parser( + 'cal_train_time', + help='parser for computing the average time per training iteration') + parser_time.add_argument( + 'json_logs', + type=str, + nargs='+', + help='path of train log in json format') + parser_time.add_argument( + '--include-outliers', + action='store_true', + help='include the first value of every epoch when computing ' + 'the average time') + + +def parse_args(): + parser = argparse.ArgumentParser(description='Analyze Json Log') + # currently only support plot curve and calculate average train time + subparsers = parser.add_subparsers(dest='task', help='task parser') + add_plot_parser(subparsers) + add_time_parser(subparsers) + args = parser.parse_args() + return args + + +def load_json_logs(json_logs): + # load and convert json_logs to log_dict, key is epoch, value is a sub dict + # keys of sub dict is different metrics, e.g. memory, bbox_mAP + # value of sub dict is a list of corresponding values of all iterations + log_dicts = [dict() for _ in json_logs] + for json_log, log_dict in zip(json_logs, log_dicts): + with open(json_log, 'r') as log_file: + for l in log_file: + log = json.loads(l.strip()) + epoch = log.pop('epoch') + if epoch not in log_dict: + log_dict[epoch] = defaultdict(list) + for k, v in log.items(): + log_dict[epoch][k].append(v) + return log_dicts + + +def main(): + args = parse_args() + + json_logs = args.json_logs + for json_log in json_logs: + assert json_log.endswith('.json') + + log_dicts = load_json_logs(json_logs) + + eval(args.task)(log_dicts, args) + + +if __name__ == '__main__': + main() diff --git a/tools/coco_eval.py b/tools/coco_eval.py new file mode 100644 index 0000000..65e114c --- /dev/null +++ b/tools/coco_eval.py @@ -0,0 +1,28 @@ +from argparse import ArgumentParser + +from mmdet.core import coco_eval + + +def main(): + parser = ArgumentParser(description='COCO Evaluation') + parser.add_argument('result', help='result file path') + parser.add_argument('--ann', help='annotation file path') + parser.add_argument( + '--types', + type=str, + nargs='+', + choices=['proposal_fast', 'proposal', 'bbox', 'segm', 'keypoint'], + default=['bbox'], + help='result types') + parser.add_argument( + '--max-dets', + type=int, + nargs='+', + default=[100, 300, 1000], + help='proposal numbers, only used for recall evaluation') + args = parser.parse_args() + coco_eval(args.result, args.types, args.ann, args.max_dets) + + +if __name__ == '__main__': + main() diff --git a/tools/convert_datasets/pascal_voc.py b/tools/convert_datasets/pascal_voc.py new file mode 100644 index 0000000..5fb5cb4 --- /dev/null +++ b/tools/convert_datasets/pascal_voc.py @@ -0,0 +1,140 @@ +import argparse +import os.path as osp +import xml.etree.ElementTree as ET + +import mmcv +import numpy as np + +from mmdet.core import voc_classes + +label_ids = {name: i + 1 for i, name in enumerate(voc_classes())} + + +def parse_xml(args): + xml_path, img_path = args + tree = ET.parse(xml_path) + root = tree.getroot() + size = root.find('size') + w = int(size.find('width').text) + h = int(size.find('height').text) + bboxes = [] + labels = [] + bboxes_ignore = [] + labels_ignore = [] + for obj in root.findall('object'): + name = obj.find('name').text + label = label_ids[name] + difficult = int(obj.find('difficult').text) + bnd_box = obj.find('bndbox') + bbox = [ + int(bnd_box.find('xmin').text), + int(bnd_box.find('ymin').text), + int(bnd_box.find('xmax').text), + int(bnd_box.find('ymax').text) + ] + if difficult: + bboxes_ignore.append(bbox) + labels_ignore.append(label) + else: + bboxes.append(bbox) + labels.append(label) + if not bboxes: + bboxes = np.zeros((0, 4)) + labels = np.zeros((0, )) + else: + bboxes = np.array(bboxes, ndmin=2) - 1 + labels = np.array(labels) + if not bboxes_ignore: + bboxes_ignore = np.zeros((0, 4)) + labels_ignore = np.zeros((0, )) + else: + bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1 + labels_ignore = np.array(labels_ignore) + annotation = { + 'filename': img_path, + 'width': w, + 'height': h, + 'ann': { + 'bboxes': bboxes.astype(np.float32), + 'labels': labels.astype(np.int64), + 'bboxes_ignore': bboxes_ignore.astype(np.float32), + 'labels_ignore': labels_ignore.astype(np.int64) + } + } + return annotation + + +def cvt_annotations(devkit_path, years, split, out_file): + if not isinstance(years, list): + years = [years] + annotations = [] + for year in years: + filelist = osp.join(devkit_path, 'VOC{}/ImageSets/Main/{}.txt'.format( + year, split)) + if not osp.isfile(filelist): + print('filelist does not exist: {}, skip voc{} {}'.format( + filelist, year, split)) + return + img_names = mmcv.list_from_file(filelist) + xml_paths = [ + osp.join(devkit_path, 'VOC{}/Annotations/{}.xml'.format( + year, img_name)) for img_name in img_names + ] + img_paths = [ + 'VOC{}/JPEGImages/{}.jpg'.format(year, img_name) + for img_name in img_names + ] + part_annotations = mmcv.track_progress(parse_xml, + list(zip(xml_paths, img_paths))) + annotations.extend(part_annotations) + mmcv.dump(annotations, out_file) + return annotations + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert PASCAL VOC annotations to mmdetection format') + parser.add_argument('devkit_path', help='pascal voc devkit path') + parser.add_argument('-o', '--out-dir', help='output path') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + devkit_path = args.devkit_path + out_dir = args.out_dir if args.out_dir else devkit_path + mmcv.mkdir_or_exist(out_dir) + + years = [] + if osp.isdir(osp.join(devkit_path, 'VOC2007')): + years.append('2007') + if osp.isdir(osp.join(devkit_path, 'VOC2012')): + years.append('2012') + if '2007' in years and '2012' in years: + years.append(['2007', '2012']) + if not years: + raise IOError('The devkit path {} contains neither "VOC2007" nor ' + '"VOC2012" subfolder'.format(devkit_path)) + for year in years: + if year == '2007': + prefix = 'voc07' + elif year == '2012': + prefix = 'voc12' + elif year == ['2007', '2012']: + prefix = 'voc0712' + for split in ['train', 'val', 'trainval']: + dataset_name = prefix + '_' + split + print('processing {} ...'.format(dataset_name)) + cvt_annotations(devkit_path, year, split, + osp.join(out_dir, dataset_name + '.pkl')) + if not isinstance(year, list): + dataset_name = prefix + '_test' + print('processing {} ...'.format(dataset_name)) + cvt_annotations(devkit_path, year, 'test', + osp.join(out_dir, dataset_name + '.pkl')) + print('Done!') + + +if __name__ == '__main__': + main() diff --git a/tools/dist_test.sh b/tools/dist_test.sh new file mode 100755 index 0000000..5f6abf1 --- /dev/null +++ b/tools/dist_test.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +PYTHON=${PYTHON:-"python"} + +CONFIG=$1 +CHECKPOINT=$2 +GPUS=$3 + +$PYTHON -m torch.distributed.launch --nproc_per_node=$GPUS \ + $(dirname "$0")/test.py $CONFIG $CHECKPOINT --launcher pytorch ${@:4} diff --git a/tools/dist_train.sh b/tools/dist_train.sh new file mode 100755 index 0000000..a6ed485 --- /dev/null +++ b/tools/dist_train.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +PYTHON=${PYTHON:-"python"} + +CONFIG=$1 +GPUS=$2 + +$PYTHON -m torch.distributed.launch --nproc_per_node=$GPUS \ + $(dirname "$0")/train.py $CONFIG --launcher pytorch ${@:3} diff --git a/tools/publish_model.py b/tools/publish_model.py new file mode 100644 index 0000000..39795f1 --- /dev/null +++ b/tools/publish_model.py @@ -0,0 +1,34 @@ +import argparse +import subprocess +import torch + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Process a checkpoint to be published') + parser.add_argument('in_file', help='input checkpoint filename') + parser.add_argument('out_file', help='output checkpoint filename') + args = parser.parse_args() + return args + + +def process_checkpoint(in_file, out_file): + checkpoint = torch.load(in_file, map_location='cpu') + # remove optimizer for smaller file size + if 'optimizer' in checkpoint: + del checkpoint['optimizer'] + # if it is necessary to remove some sensitive data in checkpoint['meta'], + # add the code here. + torch.save(checkpoint, out_file) + sha = subprocess.check_output(['sha256sum', out_file]).decode() + final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8]) + subprocess.Popen(['mv', out_file, final_file]) + + +def main(): + args = parse_args() + process_checkpoint(args.in_file, args.out_file) + + +if __name__ == '__main__': + main() diff --git a/tools/slurm_test.sh b/tools/slurm_test.sh new file mode 100755 index 0000000..8950bc8 --- /dev/null +++ b/tools/slurm_test.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +JOB_NAME=$2 +CONFIG=$3 +CHECKPOINT=$4 +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +PY_ARGS=${@:5} +SRUN_ARGS=${SRUN_ARGS:-""} + +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS} diff --git a/tools/slurm_train.sh b/tools/slurm_train.sh new file mode 100755 index 0000000..45474c4 --- /dev/null +++ b/tools/slurm_train.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +JOB_NAME=$2 +CONFIG=$3 +WORK_DIR=$4 +GPUS=${5:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${PY_ARGS:-"--validate"} + +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/train.py ${CONFIG} --work_dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS} diff --git a/tools/test.py b/tools/test.py new file mode 100644 index 0000000..9719f45 --- /dev/null +++ b/tools/test.py @@ -0,0 +1,189 @@ +import argparse +import os.path as osp +import shutil +import tempfile + +import mmcv +import torch +import torch.distributed as dist +from mmcv.runner import load_checkpoint, get_dist_info +from mmcv.parallel import MMDataParallel, MMDistributedDataParallel + +from mmdet.apis import init_dist +from mmdet.core import results2json, coco_eval +from mmdet.datasets import build_dataloader, get_dataset +from mmdet.models import build_detector + + +def single_gpu_test(model, data_loader, show=False): + model.eval() + results = [] + dataset = data_loader.dataset + prog_bar = mmcv.ProgressBar(len(dataset)) + for i, data in enumerate(data_loader): + with torch.no_grad(): + result = model(return_loss=False, rescale=not show, **data) + results.append(result) + + if show: + model.module.show_result(data, result, dataset.img_norm_cfg) + + batch_size = data['img'][0].size(0) + for _ in range(batch_size): + prog_bar.update() + return results + + +def multi_gpu_test(model, data_loader, tmpdir=None): + model.eval() + results = [] + dataset = data_loader.dataset + rank, world_size = get_dist_info() + if rank == 0: + prog_bar = mmcv.ProgressBar(len(dataset)) + for i, data in enumerate(data_loader): + with torch.no_grad(): + result = model(return_loss=False, rescale=True, **data) + results.append(result) + + if rank == 0: + batch_size = data['img'][0].size(0) + for _ in range(batch_size * world_size): + prog_bar.update() + + # collect results from all ranks + results = collect_results(results, len(dataset), tmpdir) + + return results + + +def collect_results(result_part, size, tmpdir=None): + rank, world_size = get_dist_info() + # create a tmp dir if it is not specified + if tmpdir is None: + MAX_LEN = 512 + # 32 is whitespace + dir_tensor = torch.full( + (MAX_LEN, ), 32, dtype=torch.uint8, device='cuda') + if rank == 0: + tmpdir = tempfile.mkdtemp() + tmpdir = torch.tensor( + bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') + dir_tensor[:len(tmpdir)] = tmpdir + dist.broadcast(dir_tensor, 0) + tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() + else: + mmcv.mkdir_or_exist(tmpdir) + # dump the part result to the dir + mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank))) + dist.barrier() + # collect all parts + if rank != 0: + return None + else: + # load results of all parts from tmp dir + part_list = [] + for i in range(world_size): + part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i)) + part_list.append(mmcv.load(part_file)) + # sort the results + ordered_results = [] + for res in zip(*part_list): + ordered_results.extend(list(res)) + # the dataloader may pad some samples + ordered_results = ordered_results[:size] + # remove tmp dir + shutil.rmtree(tmpdir) + return ordered_results + + +def parse_args(): + parser = argparse.ArgumentParser(description='MMDet test detector') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument('--out', help='output result file') + parser.add_argument( + '--eval', + type=str, + nargs='+', + choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'], + help='eval types') + parser.add_argument('--show', action='store_true', help='show results') + parser.add_argument('--tmpdir', help='tmp dir for writing some results') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): + raise ValueError('The output file must be a pkl file.') + + cfg = mmcv.Config.fromfile(args.config) + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + cfg.model.pretrained = None + cfg.data.test.test_mode = True + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + else: + distributed = True + init_dist(args.launcher, **cfg.dist_params) + + # build the dataloader + # TODO: support multiple images per gpu (only minor changes are needed) + dataset = get_dataset(cfg.data.test) + data_loader = build_dataloader( + dataset, + imgs_per_gpu=1, + workers_per_gpu=cfg.data.workers_per_gpu, + dist=distributed, + shuffle=False) + + # build the model and load checkpoint + model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) + load_checkpoint(model, args.checkpoint, map_location='cpu') + + if not distributed: + model = MMDataParallel(model, device_ids=[0]) + outputs = single_gpu_test(model, data_loader, args.show) + else: + model = MMDistributedDataParallel(model.cuda()) + outputs = multi_gpu_test(model, data_loader, args.tmpdir) + + rank, _ = get_dist_info() + if args.out and rank == 0: + print('\nwriting results to {}'.format(args.out)) + mmcv.dump(outputs, args.out) + eval_types = args.eval + if eval_types: + print('Starting evaluate {}'.format(' and '.join(eval_types))) + if eval_types == ['proposal_fast']: + result_file = args.out + coco_eval(result_file, eval_types, dataset.coco) + else: + if not isinstance(outputs[0], dict): + result_file = args.out + '.json' + results2json(dataset, outputs, result_file) + coco_eval(result_file, eval_types, dataset.coco) + else: + for name in outputs[0]: + print('\nEvaluating {}'.format(name)) + outputs_ = [out[name] for out in outputs] + result_file = args.out + '.{}.json'.format(name) + results2json(dataset, outputs_, result_file) + coco_eval(result_file, eval_types, dataset.coco) + + +if __name__ == '__main__': + main() diff --git a/tools/train.py b/tools/train.py new file mode 100644 index 0000000..3e06d6f --- /dev/null +++ b/tools/train.py @@ -0,0 +1,95 @@ +from __future__ import division + +import argparse +from mmcv import Config + +from mmdet import __version__ +from mmdet.datasets import get_dataset +from mmdet.apis import (train_detector, init_dist, get_root_logger, + set_random_seed) +from mmdet.models import build_detector +import torch + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a detector') + parser.add_argument('config', help='train config file path') + parser.add_argument('--work_dir', help='the dir to save logs and models') + parser.add_argument( + '--resume_from', help='the checkpoint file to resume from') + parser.add_argument( + '--validate', + action='store_true', + help='whether to evaluate the checkpoint during training') + parser.add_argument( + '--gpus', + type=int, + default=1, + help='number of gpus to use ' + '(only applicable to non-distributed training)') + parser.add_argument('--seed', type=int, default=None, help='random seed') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + args = parser.parse_args() + + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + # update configs according to CLI args + if args.work_dir is not None: + cfg.work_dir = args.work_dir + if args.resume_from is not None: + cfg.resume_from = args.resume_from + cfg.gpus = args.gpus + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + else: + distributed = True + init_dist(args.launcher, **cfg.dist_params) + + # init logger before other steps + logger = get_root_logger(cfg.log_level) + logger.info('Distributed training: {}'.format(distributed)) + + # set random seeds + if args.seed is not None: + logger.info('Set random seed to {}'.format(args.seed)) + set_random_seed(args.seed) + + model = build_detector( + cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) + + train_dataset = get_dataset(cfg.data.train) + if cfg.checkpoint_config is not None: + # save mmdet version, config file content and class names in + # checkpoints as meta data + cfg.checkpoint_config.meta = dict( + mmdet_version=__version__, + config=cfg.text, + classes=train_dataset.CLASSES) + # add an attribute for visualization convenience + model.CLASSES = train_dataset.CLASSES + train_detector( + model, + train_dataset, + cfg, + distributed=distributed, + validate=args.validate, + logger=logger) + + +if __name__ == '__main__': + main() diff --git a/tools/upgrade_model_version.py b/tools/upgrade_model_version.py new file mode 100644 index 0000000..00bcdf4 --- /dev/null +++ b/tools/upgrade_model_version.py @@ -0,0 +1,42 @@ +import argparse +import re +from collections import OrderedDict + +import torch + + +def convert(in_file, out_file): + """Convert keys in checkpoints. + + There can be some breaking changes during the development of mmdetection, + and this tool is used for upgrading checkpoints trained with old versions + to the latest one. + """ + checkpoint = torch.load(in_file) + in_state_dict = checkpoint.pop('state_dict') + out_state_dict = OrderedDict() + for key, val in in_state_dict.items(): + # Use ConvModule instead of nn.Conv2d in RetinaNet + # cls_convs.0.weight -> cls_convs.0.conv.weight + m = re.search(r'(cls_convs|reg_convs).\d.(weight|bias)', key) + if m is not None: + param = m.groups()[1] + new_key = key.replace(param, 'conv.{}'.format(param)) + out_state_dict[new_key] = val + continue + + out_state_dict[key] = val + checkpoint['state_dict'] = out_state_dict + torch.save(checkpoint, out_file) + + +def main(): + parser = argparse.ArgumentParser(description='Upgrade model version') + parser.add_argument('in_file', help='input checkpoint file') + parser.add_argument('out_file', help='output checkpoint file') + args = parser.parse_args() + convert(args.in_file, args.out_file) + + +if __name__ == '__main__': + main() diff --git a/tools/voc_eval.py b/tools/voc_eval.py new file mode 100644 index 0000000..478ec3c --- /dev/null +++ b/tools/voc_eval.py @@ -0,0 +1,62 @@ +from argparse import ArgumentParser + +import mmcv +import numpy as np + +from mmdet import datasets +from mmdet.core import eval_map + + +def voc_eval(result_file, dataset, iou_thr=0.5): + det_results = mmcv.load(result_file) + gt_bboxes = [] + gt_labels = [] + gt_ignore = [] + for i in range(len(dataset)): + ann = dataset.get_ann_info(i) + bboxes = ann['bboxes'] + labels = ann['labels'] + if 'bboxes_ignore' in ann: + ignore = np.concatenate([ + np.zeros(bboxes.shape[0], dtype=np.bool), + np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool) + ]) + gt_ignore.append(ignore) + bboxes = np.vstack([bboxes, ann['bboxes_ignore']]) + labels = np.concatenate([labels, ann['labels_ignore']]) + gt_bboxes.append(bboxes) + gt_labels.append(labels) + if not gt_ignore: + gt_ignore = gt_ignore + if hasattr(dataset, 'year') and dataset.year == 2007: + dataset_name = 'voc07' + else: + dataset_name = dataset.CLASSES + eval_map( + det_results, + gt_bboxes, + gt_labels, + gt_ignore=gt_ignore, + scale_ranges=None, + iou_thr=iou_thr, + dataset=dataset_name, + print_summary=True) + + +def main(): + parser = ArgumentParser(description='VOC Evaluation') + parser.add_argument('result', help='result file path') + parser.add_argument('config', help='config file path') + parser.add_argument( + '--iou-thr', + type=float, + default=0.5, + help='IoU threshold for evaluation') + args = parser.parse_args() + cfg = mmcv.Config.fromfile(args.config) + test_dataset = mmcv.runner.obj_from_dict(cfg.data.test, datasets) + voc_eval(args.result, test_dataset, args.iou_thr) + + +if __name__ == '__main__': + main()