Skip to content

Commit

Permalink
Sweep the ❌ models again(no train) (#690)
Browse files Browse the repository at this point in the history
* sweep failed model

* Regist converted_end_to_end mark

* Skip calculate retinanet_resnet50_fpn_v2 because before/after shape not the same
  • Loading branch information
swimdi authored Dec 30, 2024
1 parent 7d2e33c commit 3be2fd5
Show file tree
Hide file tree
Showing 6 changed files with 33 additions and 8 deletions.
2 changes: 1 addition & 1 deletion tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ def compile_and_run(device, reset_torch_dynamo, request):
if len(option._out_fx_graphs) > 0:
option._out_fx_graphs[0].print_tabular()

if model_name not in ["speecht5-tts", "ssd300_vgg16"]:
if model_name not in ["speecht5-tts", "ssd300_vgg16", "retinanet_resnet50_fpn_v2"]:
accuracy = calculate_accuracy(outputs, outputs_after)
if accuracy:
comp_runtime_metrics["accuracy"] = accuracy
Expand Down
6 changes: 2 additions & 4 deletions tests/models/torchvision/test_torchvision_object_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,8 @@ def _load_inputs(self):
[
("ssd300_vgg16", "SSD300_VGG16_Weights"),
("ssdlite320_mobilenet_v3_large", "SSDLite320_MobileNet_V3_Large_Weights"),
pytest.param(("retinanet_resnet50_fpn", "RetinaNet_ResNet50_FPN_Weights"), marks=pytest.mark.compilation_xfail),
pytest.param(
("retinanet_resnet50_fpn_v2", "RetinaNet_ResNet50_FPN_V2_Weights"), marks=pytest.mark.compilation_xfail
),
("retinanet_resnet50_fpn", "RetinaNet_ResNet50_FPN_Weights"),
("retinanet_resnet50_fpn_v2", "RetinaNet_ResNet50_FPN_V2_Weights"),
],
)
def test_torchvision_object_detection(record_property, model_info, mode):
Expand Down
1 change: 1 addition & 0 deletions tests/pytest.ini
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,4 @@
markers =
compilation_xfail: marks tests with compiled run as xfail but does not change torch run
skip_platform: marks tests that are not compatible with specified platform
converted_end_to_end: marks tests that all aten ops are converted to ttnn ops
6 changes: 4 additions & 2 deletions torch_ttnn/passes/lowering/to_tt_guard.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,9 @@
# EXTRA BLOCKLIST OF retinanet_resnet50_fpn
############################################################
# Statically allocated circular buffers on core range [(x=0,y=0) - (x=0,y=0)] grow to 3580704 B which is beyond max L1 size of 1499136 B
# TODO: not pass yet

aten_aten_stack_default = [
["List[Tensor] tensors = [<[13600]>, <[13600]>, <[13600]>, <[13600]>]", "int dim = 1"],
]

############################################################
# EXTRA BLOCKLIST OF retinanet_resnet50_fpn_v2
Expand All @@ -92,6 +93,7 @@

GUARD[torch.ops.aten.gt.Scalar] = partial(guard_aten, aten_gt_Scalar_blocklist)
GUARD[torch.ops.aten.cumsum.default] = partial(guard_aten, aten_cumsum_default_blocklist)
GUARD[torch.ops.aten.stack.default] = partial(guard_aten, aten_aten_stack_default)


def can_lowering_to_ttnn(node):
Expand Down
22 changes: 22 additions & 0 deletions torch_ttnn/passes/lowering/to_tt_guard_autogen.py
Original file line number Diff line number Diff line change
Expand Up @@ -286,6 +286,28 @@
"List[int] output_padding = [0, 0]",
"int groups = 960",
],
[
"Tensor<[1, 768, 3000]> input = ?",
"Tensor<[768, 768, 3]> weight = ?",
"Optional[Tensor]<[768]> bias = ?",
"List[int] stride = [2]",
"List[int] padding = [1]",
"List[int] dilation = [1]",
"bool transposed = False",
"List[int] output_padding = [0]",
"int groups = 1",
],
[
"Tensor<[1, 320, 64, 64]> input = ?",
"Tensor<[320, 320, 3, 3]> weight = ?",
"Optional[Tensor]<[320]> bias = ?",
"List[int] stride = [1, 1]",
"List[int] padding = [1, 1]",
"List[int] dilation = [1, 1]",
"bool transposed = False",
"List[int] output_padding = [0, 0]",
"int groups = 1",
],
# TODO(tt-metal#16173): weight_matrix_width_ntiles % weight_block_w_ntiles == 0
[
"Tensor<[1, 1232, 14, 14]> input = ?",
Expand Down
4 changes: 3 additions & 1 deletion torch_ttnn/passes/lowering/to_tt_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -706,7 +706,9 @@ def reshape_1d(code, args=args, kwargs=kwargs):

if node.target == torch.ops.aten.repeat.default:
tensor, sizes = args
shape = tensor.meta["val"].size()
shape = get_shape(gm, tensor)
if shape is None:
return None

if np.prod(sizes) == 1:
return tensor
Expand Down

0 comments on commit 3be2fd5

Please sign in to comment.