-
Notifications
You must be signed in to change notification settings - Fork 110
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Browse files
Browse the repository at this point in the history
) ### Ticket [Link to Github Issue](#11512) ### Problem description We need sweep tests testing how ops behave when input is sharded. ### What's changed Added sweep tests for multiple ops when input is sharded: logical_and logical_or logical_xor Also updated unit tests with failing situations. ### Pass rates for new sweeps: sweeps/eltwise/binary/logical_and/logical_and_sharded.py: 210 fail, 1326 pass (86%) sweeps/eltwise/binary/logical_or/logical_or_sharded.py: 253 fail, 1283 pass (83%) sweeps/eltwise/binary/logical_xor/logical_xor_sharded.py: 1207 fail, 329 pass (21%) ### Checklist - [X] Post commit CI passes https://github.com/tenstorrent/tt-metal/actions/runs/12431640958 - [X] Sweep tests pass
- Loading branch information
1 parent
608d8be
commit f6dcf81
Showing
4 changed files
with
363 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
120 changes: 120 additions & 0 deletions
120
tests/sweep_framework/sweeps/eltwise/binary/logical_and/logical_and_sharded.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,120 @@ | ||
# SPDX-FileCopyrightText: © 2024 Tenstorrent Inc. | ||
|
||
# SPDX-License-Identifier: Apache-2.0 | ||
|
||
from typing import Optional, Tuple | ||
from functools import partial | ||
|
||
import math | ||
import torch | ||
import random | ||
import ttnn | ||
from tests.sweep_framework.sweep_utils.utils import gen_shapes, tensor_to_dtype, sanitize_shape_rm | ||
from tests.sweep_framework.sweep_utils.sharding_utils import gen_sharded_spec_unary, parse_sharding_spec | ||
from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt | ||
|
||
from tests.ttnn.utils_for_testing import assert_equal, check_with_pcc, start_measuring_time, stop_measuring_time | ||
from models.utility_functions import torch_random | ||
|
||
# Override the default timeout in seconds for hang detection. | ||
TIMEOUT = 30 | ||
|
||
random.seed(0) | ||
|
||
# Parameters provided to the test vector generator are defined here. | ||
# They are defined as dict-type suites that contain the arguments to the run function as keys, and lists of possible inputs as values. | ||
# Each suite has a key name (in this case "suite_1") which will associate the test vectors to this specific suite of inputs. | ||
# Developers can create their own generator functions and pass them to the parameters as inputs. | ||
parameters = { | ||
"nightly": { | ||
"input_spec": gen_sharded_spec_unary(8, layouts=["TILE_LAYOUT"]), | ||
"input_a_dtype": [ttnn.bfloat16, ttnn.bfloat8_b], | ||
"input_b_dtype": [ttnn.bfloat16, ttnn.bfloat8_b], | ||
"output_dtype": [ttnn.bfloat16, ttnn.bfloat8_b], | ||
}, | ||
} | ||
|
||
|
||
# Invalidate vector is called during the generation phase where each vector will be passed in. | ||
# If invalidated, the vector will still be stored but will be skipped. | ||
# Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid. | ||
def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]: | ||
input_shape, X, Y, sharding_strategy, _, _, input_layout = test_vector["input_spec"].values() | ||
pre_sharded_height = math.prod(input_shape[:-1]) | ||
pre_sharded_width = input_shape[-1] | ||
|
||
if input_layout == "ROW_MAJOR_LAYOUT": | ||
return True, "Input to eltwise binary must be tilized" | ||
|
||
if input_layout == "ROW_MAJOR_LAYOUT" and test_vector["input_a_dtype"] == ttnn.bfloat8_b: | ||
return True, "bfloat8_b is only supported on tiled layout" | ||
|
||
return False, None | ||
|
||
|
||
# This is the run instructions for the test, defined by the developer. | ||
# The run function must take the above-defined parameters as inputs. | ||
# The runner will call this run function with each test vector, and the returned results from this function will be stored. | ||
# If you defined a device_mesh_fixture above, the object you yielded will be passed into this function as 'device'. Otherwise, it will be the default ttnn device opened by the infra. | ||
def run( | ||
input_spec, | ||
input_a_dtype, | ||
input_b_dtype, | ||
output_dtype, | ||
*, | ||
device, | ||
) -> list: | ||
data_seed = random.randint(0, 20000000) | ||
torch.manual_seed(data_seed) | ||
|
||
( | ||
input_shape, | ||
core_grid, | ||
sharding_strategy, | ||
shard_orientation, | ||
tensor_hw_as_shard_shape, | ||
input_layout, | ||
) = parse_sharding_spec(input_spec) | ||
|
||
if input_layout == ttnn.ROW_MAJOR_LAYOUT: | ||
input_shape = sanitize_shape_rm(input_shape) | ||
|
||
torch_input_tensor_a = gen_func_with_cast_tt( | ||
partial(torch_random, low=-100, high=100, dtype=torch.float32), input_a_dtype | ||
)(input_shape) | ||
torch_input_tensor_b = gen_func_with_cast_tt( | ||
partial(torch_random, low=-100, high=100, dtype=torch.float32), input_b_dtype | ||
)(input_shape) | ||
|
||
torch_output_tensor = tensor_to_dtype(torch.logical_and(torch_input_tensor_a, torch_input_tensor_b), output_dtype) | ||
|
||
sharded_config = ttnn.create_sharded_memory_config_( | ||
shape=input_shape, | ||
core_grid=core_grid, | ||
strategy=sharding_strategy, | ||
orientation=shard_orientation, | ||
use_height_and_width_as_shard_shape=tensor_hw_as_shard_shape, | ||
) | ||
|
||
input_tensor_a = ttnn.from_torch( | ||
torch_input_tensor_a, | ||
dtype=input_a_dtype, | ||
layout=input_layout, | ||
device=device, | ||
memory_config=sharded_config, | ||
) | ||
input_tensor_b = ttnn.from_torch( | ||
torch_input_tensor_b, | ||
dtype=input_b_dtype, | ||
layout=input_layout, | ||
device=device, | ||
memory_config=sharded_config, | ||
) | ||
start_time = start_measuring_time() | ||
output_tensor = ttnn.logical_and(input_tensor_a, input_tensor_b, memory_config=sharded_config, dtype=output_dtype) | ||
output_tensor = ttnn.to_torch(output_tensor) | ||
e2e_perf = stop_measuring_time(start_time) | ||
|
||
# pcc = assert_equal(torch_output_tensor, output_tensor) | ||
pcc = check_with_pcc(torch_output_tensor, output_tensor, 0.999) | ||
return [pcc, e2e_perf] |
120 changes: 120 additions & 0 deletions
120
tests/sweep_framework/sweeps/eltwise/binary/logical_or/logical_or_sharded.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,120 @@ | ||
# SPDX-FileCopyrightText: © 2024 Tenstorrent Inc. | ||
|
||
# SPDX-License-Identifier: Apache-2.0 | ||
|
||
from typing import Optional, Tuple | ||
from functools import partial | ||
|
||
import math | ||
import torch | ||
import random | ||
import ttnn | ||
from tests.sweep_framework.sweep_utils.utils import gen_shapes, tensor_to_dtype, sanitize_shape_rm | ||
from tests.sweep_framework.sweep_utils.sharding_utils import gen_sharded_spec_unary, parse_sharding_spec | ||
from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt | ||
|
||
from tests.ttnn.utils_for_testing import assert_equal, check_with_pcc, start_measuring_time, stop_measuring_time | ||
from models.utility_functions import torch_random | ||
|
||
# Override the default timeout in seconds for hang detection. | ||
TIMEOUT = 30 | ||
|
||
random.seed(0) | ||
|
||
# Parameters provided to the test vector generator are defined here. | ||
# They are defined as dict-type suites that contain the arguments to the run function as keys, and lists of possible inputs as values. | ||
# Each suite has a key name (in this case "suite_1") which will associate the test vectors to this specific suite of inputs. | ||
# Developers can create their own generator functions and pass them to the parameters as inputs. | ||
parameters = { | ||
"nightly": { | ||
"input_spec": gen_sharded_spec_unary(8, layouts=["TILE_LAYOUT"]), | ||
"input_a_dtype": [ttnn.bfloat16, ttnn.bfloat8_b], | ||
"input_b_dtype": [ttnn.bfloat16, ttnn.bfloat8_b], | ||
"output_dtype": [ttnn.bfloat16, ttnn.bfloat8_b], | ||
}, | ||
} | ||
|
||
|
||
# Invalidate vector is called during the generation phase where each vector will be passed in. | ||
# If invalidated, the vector will still be stored but will be skipped. | ||
# Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid. | ||
def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]: | ||
input_shape, X, Y, sharding_strategy, _, _, input_layout = test_vector["input_spec"].values() | ||
pre_sharded_height = math.prod(input_shape[:-1]) | ||
pre_sharded_width = input_shape[-1] | ||
|
||
if input_layout == "ROW_MAJOR_LAYOUT": | ||
return True, "Input to eltwise binary must be tilized" | ||
|
||
if input_layout == "ROW_MAJOR_LAYOUT" and test_vector["input_a_dtype"] == ttnn.bfloat8_b: | ||
return True, "bfloat8_b is only supported on tiled layout" | ||
|
||
return False, None | ||
|
||
|
||
# This is the run instructions for the test, defined by the developer. | ||
# The run function must take the above-defined parameters as inputs. | ||
# The runner will call this run function with each test vector, and the returned results from this function will be stored. | ||
# If you defined a device_mesh_fixture above, the object you yielded will be passed into this function as 'device'. Otherwise, it will be the default ttnn device opened by the infra. | ||
def run( | ||
input_spec, | ||
input_a_dtype, | ||
input_b_dtype, | ||
output_dtype, | ||
*, | ||
device, | ||
) -> list: | ||
data_seed = random.randint(0, 20000000) | ||
torch.manual_seed(data_seed) | ||
|
||
( | ||
input_shape, | ||
core_grid, | ||
sharding_strategy, | ||
shard_orientation, | ||
tensor_hw_as_shard_shape, | ||
input_layout, | ||
) = parse_sharding_spec(input_spec) | ||
|
||
if input_layout == ttnn.ROW_MAJOR_LAYOUT: | ||
input_shape = sanitize_shape_rm(input_shape) | ||
|
||
torch_input_tensor_a = gen_func_with_cast_tt( | ||
partial(torch_random, low=-100, high=100, dtype=torch.float32), input_a_dtype | ||
)(input_shape) | ||
torch_input_tensor_b = gen_func_with_cast_tt( | ||
partial(torch_random, low=-100, high=100, dtype=torch.float32), input_b_dtype | ||
)(input_shape) | ||
|
||
torch_output_tensor = tensor_to_dtype(torch.logical_or(torch_input_tensor_a, torch_input_tensor_b), output_dtype) | ||
|
||
sharded_config = ttnn.create_sharded_memory_config_( | ||
shape=input_shape, | ||
core_grid=core_grid, | ||
strategy=sharding_strategy, | ||
orientation=shard_orientation, | ||
use_height_and_width_as_shard_shape=tensor_hw_as_shard_shape, | ||
) | ||
|
||
input_tensor_a = ttnn.from_torch( | ||
torch_input_tensor_a, | ||
dtype=input_a_dtype, | ||
layout=input_layout, | ||
device=device, | ||
memory_config=sharded_config, | ||
) | ||
input_tensor_b = ttnn.from_torch( | ||
torch_input_tensor_b, | ||
dtype=input_b_dtype, | ||
layout=input_layout, | ||
device=device, | ||
memory_config=sharded_config, | ||
) | ||
start_time = start_measuring_time() | ||
output_tensor = ttnn.logical_or(input_tensor_a, input_tensor_b, memory_config=sharded_config, dtype=output_dtype) | ||
output_tensor = ttnn.to_torch(output_tensor) | ||
e2e_perf = stop_measuring_time(start_time) | ||
|
||
# pcc = assert_equal(torch_output_tensor, output_tensor) | ||
pcc = check_with_pcc(torch_output_tensor, output_tensor, 0.999) | ||
return [pcc, e2e_perf] |
120 changes: 120 additions & 0 deletions
120
tests/sweep_framework/sweeps/eltwise/binary/logical_xor/logical_xor_sharded.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,120 @@ | ||
# SPDX-FileCopyrightText: © 2024 Tenstorrent Inc. | ||
|
||
# SPDX-License-Identifier: Apache-2.0 | ||
|
||
from typing import Optional, Tuple | ||
from functools import partial | ||
|
||
import math | ||
import torch | ||
import random | ||
import ttnn | ||
from tests.sweep_framework.sweep_utils.utils import gen_shapes, tensor_to_dtype, sanitize_shape_rm | ||
from tests.sweep_framework.sweep_utils.sharding_utils import gen_sharded_spec_unary, parse_sharding_spec | ||
from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt | ||
|
||
from tests.ttnn.utils_for_testing import assert_equal, check_with_pcc, start_measuring_time, stop_measuring_time | ||
from models.utility_functions import torch_random | ||
|
||
# Override the default timeout in seconds for hang detection. | ||
TIMEOUT = 30 | ||
|
||
random.seed(0) | ||
|
||
# Parameters provided to the test vector generator are defined here. | ||
# They are defined as dict-type suites that contain the arguments to the run function as keys, and lists of possible inputs as values. | ||
# Each suite has a key name (in this case "suite_1") which will associate the test vectors to this specific suite of inputs. | ||
# Developers can create their own generator functions and pass them to the parameters as inputs. | ||
parameters = { | ||
"nightly": { | ||
"input_spec": gen_sharded_spec_unary(8, layouts=["TILE_LAYOUT"]), | ||
"input_a_dtype": [ttnn.bfloat16, ttnn.bfloat8_b], | ||
"input_b_dtype": [ttnn.bfloat16, ttnn.bfloat8_b], | ||
"output_dtype": [ttnn.bfloat16, ttnn.bfloat8_b], | ||
}, | ||
} | ||
|
||
|
||
# Invalidate vector is called during the generation phase where each vector will be passed in. | ||
# If invalidated, the vector will still be stored but will be skipped. | ||
# Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid. | ||
def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]: | ||
input_shape, X, Y, sharding_strategy, _, _, input_layout = test_vector["input_spec"].values() | ||
pre_sharded_height = math.prod(input_shape[:-1]) | ||
pre_sharded_width = input_shape[-1] | ||
|
||
if input_layout == "ROW_MAJOR_LAYOUT": | ||
return True, "Input to eltwise binary must be tilized" | ||
|
||
if input_layout == "ROW_MAJOR_LAYOUT" and test_vector["input_a_dtype"] == ttnn.bfloat8_b: | ||
return True, "bfloat8_b is only supported on tiled layout" | ||
|
||
return False, None | ||
|
||
|
||
# This is the run instructions for the test, defined by the developer. | ||
# The run function must take the above-defined parameters as inputs. | ||
# The runner will call this run function with each test vector, and the returned results from this function will be stored. | ||
# If you defined a device_mesh_fixture above, the object you yielded will be passed into this function as 'device'. Otherwise, it will be the default ttnn device opened by the infra. | ||
def run( | ||
input_spec, | ||
input_a_dtype, | ||
input_b_dtype, | ||
output_dtype, | ||
*, | ||
device, | ||
) -> list: | ||
data_seed = random.randint(0, 20000000) | ||
torch.manual_seed(data_seed) | ||
|
||
( | ||
input_shape, | ||
core_grid, | ||
sharding_strategy, | ||
shard_orientation, | ||
tensor_hw_as_shard_shape, | ||
input_layout, | ||
) = parse_sharding_spec(input_spec) | ||
|
||
if input_layout == ttnn.ROW_MAJOR_LAYOUT: | ||
input_shape = sanitize_shape_rm(input_shape) | ||
|
||
torch_input_tensor_a = gen_func_with_cast_tt( | ||
partial(torch_random, low=-100, high=100, dtype=torch.float32), input_a_dtype | ||
)(input_shape) | ||
torch_input_tensor_b = gen_func_with_cast_tt( | ||
partial(torch_random, low=-100, high=100, dtype=torch.float32), input_b_dtype | ||
)(input_shape) | ||
|
||
torch_output_tensor = tensor_to_dtype(torch.logical_xor(torch_input_tensor_a, torch_input_tensor_b), output_dtype) | ||
|
||
sharded_config = ttnn.create_sharded_memory_config_( | ||
shape=input_shape, | ||
core_grid=core_grid, | ||
strategy=sharding_strategy, | ||
orientation=shard_orientation, | ||
use_height_and_width_as_shard_shape=tensor_hw_as_shard_shape, | ||
) | ||
|
||
input_tensor_a = ttnn.from_torch( | ||
torch_input_tensor_a, | ||
dtype=input_a_dtype, | ||
layout=input_layout, | ||
device=device, | ||
memory_config=sharded_config, | ||
) | ||
input_tensor_b = ttnn.from_torch( | ||
torch_input_tensor_b, | ||
dtype=input_b_dtype, | ||
layout=input_layout, | ||
device=device, | ||
memory_config=sharded_config, | ||
) | ||
start_time = start_measuring_time() | ||
output_tensor = ttnn.logical_xor(input_tensor_a, input_tensor_b, memory_config=sharded_config, dtype=output_dtype) | ||
output_tensor = ttnn.to_torch(output_tensor) | ||
e2e_perf = stop_measuring_time(start_time) | ||
|
||
# pcc = assert_equal(torch_output_tensor, output_tensor) | ||
pcc = check_with_pcc(torch_output_tensor, output_tensor, 0.999) | ||
return [pcc, e2e_perf] |