From 85c5ff4a516a081a8eaf74622e2b50d16cb2449a Mon Sep 17 00:00:00 2001 From: mcw-anasuya Date: Thu, 30 Jan 2025 13:12:32 +0000 Subject: [PATCH] #17013: Update add_bw doc --- .../eltwise/binary_backward/binary_backward_pybind.hpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ttnn/cpp/ttnn/operations/eltwise/binary_backward/binary_backward_pybind.hpp b/ttnn/cpp/ttnn/operations/eltwise/binary_backward/binary_backward_pybind.hpp index 5a5aa1255983..a66e67f10fd9 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/binary_backward/binary_backward_pybind.hpp +++ b/ttnn/cpp/ttnn/operations/eltwise/binary_backward/binary_backward_pybind.hpp @@ -747,6 +747,8 @@ void bind_binary_bw( bfloat8_b/bfloat4_b is only supported on TILE_LAYOUT + {4} + Example: >>> grad_tensor = ttnn.from_torch(torch.tensor([[1, 2], [3, 4]], dtype=torch.bfloat16), layout=ttnn.TILE_LAYOUT, device=device) >>> tensor1 = ttnn.from_torch(torch.tensor([[1, 2], [3, 4]], dtype=torch.bfloat16, requires_grad=True), layout=ttnn.TILE_LAYOUT, device=device) @@ -1183,7 +1185,8 @@ void py_module(py::module& module) { module, ttnn::add_bw, R"doc(Performs backward operations for add of :attr:`input_tensor_a` and :attr:`input_tensor_b` or :attr:`scalar` with given :attr:`grad_tensor`.)doc", - R"doc(BFLOAT16, BFLOAT8_B)doc"); + R"doc(BFLOAT16, BFLOAT8_B)doc", + R"doc(Tensors of type `ttnn.Tensor` do not support sharded memory configuration.)doc"); detail::bind_binary_bw( module,