Skip to content

Commit

Permalink
Fix torchmetric bug (#41)
Browse files Browse the repository at this point in the history
  • Loading branch information
MiXaiLL76 authored Oct 9, 2024
1 parent 5a81428 commit 50561df
Show file tree
Hide file tree
Showing 3 changed files with 207 additions and 5 deletions.
8 changes: 4 additions & 4 deletions faster_coco_eval/core/cocoeval.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,15 +169,15 @@ def _prepare(self):

img_sizes = defaultdict(tuple)

def get_img_size_by_id(image_id) -> tuple:
def get_img_size_by_id(image_id, dataset: COCO) -> tuple:
if img_sizes.get(image_id) is None:
t = self.cocoGt.imgs[image_id]
t = dataset.imgs[image_id]
img_sizes[image_id] = t["height"], t["width"]
return img_sizes[image_id]

for gt in gts:
if p.compute_rle:
get_img_size_by_id(gt["image_id"])
get_img_size_by_id(gt["image_id"], self.cocoGt)

maskUtils.calculateRleForAllAnnotations(
gts,
Expand Down Expand Up @@ -206,7 +206,7 @@ def get_img_size_by_id(image_id) -> tuple:
)

if p.compute_rle:
get_img_size_by_id(dt["image_id"])
get_img_size_by_id(dt["image_id"], self.cocoDt)

maskUtils.calculateRleForAllAnnotations(
dts,
Expand Down
2 changes: 1 addition & 1 deletion faster_coco_eval/version.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
__version__ = "1.6.1"
__version__ = "1.6.2"
__author__ = "MiXaiLL76"
202 changes: 202 additions & 0 deletions tests/test_torchmetrics.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,144 @@
import unittest
from copy import deepcopy
from unittest import TestCase

from parameterized import parameterized

try:
import torch
from lightning_utilities import apply_to_collection
from torch import BoolTensor, IntTensor, Tensor
from torchmetrics.detection.mean_ap import MeanAveragePrecision
except ImportError:
raise unittest.SkipTest("Skipping all tests for torchmetrics.")

# fmt: off
_inputs = {
"preds": [
[
{
"boxes": Tensor([[258.15, 41.29, 606.41, 285.07]]),
"scores": Tensor([0.236]),
"labels": IntTensor([4]),
}, # coco image id 42
{
"boxes": Tensor([
[61.00, 22.75, 565.00, 632.42],
[12.66, 3.32, 281.26, 275.23]]),
"scores": Tensor([0.318, 0.726]),
"labels": IntTensor([3, 2]),
}, # coco image id 73
],
[
{
"boxes": Tensor([
[87.87, 276.25, 384.29, 379.43],
[0.00, 3.66, 142.15, 316.06],
[296.55, 93.96, 314.97, 152.79],
[328.94, 97.05, 342.49, 122.98],
[356.62, 95.47, 372.33, 147.55],
[464.08, 105.09, 495.74, 146.99],
[276.11, 103.84, 291.44, 150.72],
]),
"scores": Tensor([0.546, 0.3, 0.407,
0.611, 0.335, 0.805, 0.953]),
"labels": IntTensor([4, 1, 0, 0, 0, 0, 0]),
}, # coco image id 74
{
"boxes": Tensor([
[72.92, 45.96, 91.23, 80.57],
[45.17, 45.34, 66.28, 79.83],
[82.28, 47.04, 99.66, 78.50],
[59.96, 46.17, 80.35, 80.48],
[75.29, 23.01, 91.85, 50.85],
[71.14, 1.10, 96.96, 28.33],
[61.34, 55.23, 77.14, 79.57],
[41.17, 45.78, 60.99, 78.48],
[56.18, 44.80, 64.42, 56.25],
]),
"scores": Tensor([
0.532,
0.204,
0.782,
0.202,
0.883,
0.271,
0.561,
0.204 + 1e-8, # There are some problems with sorting at the moment. When sorting score with the same values, they give different indexes. # noqa: E501
0.349
]),
"labels": IntTensor([
49,
49,
49,
49,
49,
49,
49,
49,
49
]),
}, # coco image id 987 category_id 49
],
],
"target": [
[
{
"boxes": Tensor([[214.1500, 41.2900, 562.4100, 285.0700]]),
"labels": IntTensor([4]),
}, # coco image id 42
{
"boxes": Tensor([
[13.00, 22.75, 548.98, 632.42],
[1.66, 3.32, 270.26, 275.23],
]),
"labels": IntTensor([2, 2]),
}, # coco image id 73
],
[
{
"boxes": Tensor([
[61.87, 276.25, 358.29, 379.43],
[2.75, 3.66, 162.15, 316.06],
[295.55, 93.96, 313.97, 152.79],
[326.94, 97.05, 340.49, 122.98],
[356.62, 95.47, 372.33, 147.55],
[462.08, 105.09, 493.74, 146.99],
[277.11, 103.84, 292.44, 150.72],
]),
"labels": IntTensor([4, 1, 0, 0, 0, 0, 0]),
}, # coco image id 74
{
"boxes": Tensor([
[72.92, 45.96, 91.23, 80.57],
[50.17, 45.34, 71.28, 79.83],
[81.28, 47.04, 98.66, 78.50],
[63.96, 46.17, 84.35, 80.48],
[75.29, 23.01, 91.85, 50.85],
[56.39, 21.65, 75.66, 45.54],
[73.14, 1.10, 98.96, 28.33],
[62.34, 55.23, 78.14, 79.57],
[44.17, 45.78, 63.99, 78.48],
[58.18, 44.80, 66.42, 56.25],
]),
"labels": IntTensor([
49,
49,
49,
49,
49,
49,
49,
49,
49,
49
]),
}, # coco image id 987 category_id 49
],
],
}
# fmt: on


class TestTorchmetricsLib(TestCase):
def setUp(self):
Expand Down Expand Up @@ -98,6 +229,77 @@ def test_evaluate(self):

self.assertDictEqual(result, self.valid_result)

def test_segm_iou_empty_gt_mask(self):
"""Test empty ground truths."""
backend = "faster_coco_eval"
metric = MeanAveragePrecision(iou_type="segm", backend=backend)
metric.update(
[
{
"masks": torch.randint(0, 1, (1, 10, 10)).bool(),
"scores": Tensor([0.5]),
"labels": IntTensor([4]),
}
],
[{"masks": Tensor([]), "labels": IntTensor([])}],
)
metric.compute()

@parameterized.expand([False, True])
def test_average_argument(self, class_metrics):
"""Test that average argument works.
Calculating macro on inputs that only have one label should be
the same as micro. Calculating class metrics should be the same
regardless of average argument.
"""
backend = "pycocotools"
backend = "faster_coco_eval"

if class_metrics:
_preds = _inputs["preds"]
_target = _inputs["target"]
else:
_preds = apply_to_collection(
deepcopy(_inputs["preds"]),
IntTensor,
lambda x: torch.ones_like(x),
)
_target = apply_to_collection(
deepcopy(_inputs["target"]),
IntTensor,
lambda x: torch.ones_like(x),
)

metric_macro = MeanAveragePrecision(
average="macro", class_metrics=class_metrics, backend=backend
)
metric_macro.update(_preds[0], _target[0])
metric_macro.update(_preds[1], _target[1])
result_macro = metric_macro.compute()

metric_micro = MeanAveragePrecision(
average="micro", class_metrics=class_metrics, backend=backend
)
metric_micro.update(_inputs["preds"][0], _inputs["target"][0])
metric_micro.update(_inputs["preds"][1], _inputs["target"][1])
result_micro = metric_micro.compute()

if class_metrics:
assert torch.allclose(
result_macro["map_per_class"], result_micro["map_per_class"]
)
assert torch.allclose(
result_macro["mar_100_per_class"],
result_micro["mar_100_per_class"],
)
else:
for key in result_macro:
if key == "classes":
continue
assert torch.allclose(result_macro[key], result_micro[key])


if __name__ == "__main__":
unittest.main()

0 comments on commit 50561df

Please sign in to comment.