Skip to content

Commit

Permalink
[GPU] Add intermediate weight reorder including siblings without impl
Browse files Browse the repository at this point in the history
yet
  • Loading branch information
kelvinchoi-intel committed Feb 4, 2025
1 parent 3571d18 commit 7df9c21
Show file tree
Hide file tree
Showing 2 changed files with 63 additions and 0 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -129,16 +129,31 @@ void post_optimize_weights::optimize_weights(T& node, program& p) {
auto& weights_reorder_node = node.get_dependency(i);
weights_reorder_node.get_output_layout(false);
} else {
auto siblings = prev_node.get_users();
auto weights_reorder = _rf.get_weights_reorder(prev_node.id(), weights_reorder_params);
// insert new weights reorder node to topology
p.add_intermediate(weights_reorder.first, node, i, !weights_reorder.second);
// set weights reorder's node output layout and implementation
auto& weights_reorder_node = node.get_dependency(i);
weights_reorder_node.get_output_layout(false);

// apply to other siblings
for (auto sib : siblings) {
auto sib_impl = sib->get_selected_impl();
if (sib->id().compare(node.id()) != 0 && sib->id().compare(weights_reorder.first->id) != 0 && !sib_impl) {
auto weights_reorder2 = _rf.get_weights_reorder(prev_node.id(), weights_reorder_params);
p.add_intermediate(weights_reorder2.first, *sib, i, !weights_reorder2.second);

// auto& weights_reorder_node = node.get_dependency(i);
// weights_reorder_node.get_output_layout(false);
}
}

if (!weights_reorder.second) {
set_implementation(weights_reorder_node);
}


}
}
}
Expand Down
48 changes: 48 additions & 0 deletions src/plugins/intel_gpu/tests/unit/passes/post_optimize_weights.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,9 @@
// SPDX-License-Identifier: Apache-2.0
//

#include "intel_gpu/primitives/permute.hpp"
#include "test_utils.h"
#include "random_generator.hpp"
#include "program_wrapper.h"
#include "fully_connected_inst.h"
#include "convolution_inst.h"
Expand Down Expand Up @@ -242,3 +244,49 @@ TEST(post_optimize_weights, onednn_group_conv_weights_reorder_test) {
ASSERT_TRUE(onednn_weights_params->_out_desc.get_size() == prog->get_node("weights_weights_reorder_0").get_output_layout().bytes_count());
#endif
}

TEST(post_optimize_weights, fuse_constant_transposes_removal_and_add_intermediate_including_siblings) {
auto& engine = get_test_engine();

auto input2_layout_dyn = layout{ ov::PartialShape{ -1, 32 }, data_types::f16, format::bfyx };

auto input = engine.allocate_memory({ { 2, 32 }, data_types::f16, format::bfyx });
auto input2 = engine.allocate_memory({ { 2, 32 }, data_types::f16, format::bfyx });
auto weights = engine.allocate_memory({{ 32, 2 }, data_types::f32, format::bfyx });

tests::random_generator rg(GET_SUITE_NAME);
auto input_data = rg.generate_random_2d<ov::float16>(2, 32, -1, 1);
auto input2_data = rg.generate_random_2d<ov::float16>(2, 32, -1, -1);
auto weights_data = rg.generate_random_2d<float>(32, 2, -1, 1);

set_values(input, flatten_2d(format::bfyx, input_data));
set_values(input2, input2_data);
set_values(weights, flatten_2d(format::bfyx, weights_data));

topology topology(
input_layout("input", input->get_layout()),
input_layout("input2", input2_layout_dyn),
data("weights", weights),
permute("permute_test", input_info("weights"), {1, 0}),
reorder("reorder_dt", input_info("permute_test"), format::any, data_types::f16, std::vector<float>()),
fully_connected("fc1", input_info("input"), { "reorder_dt" }, "", data_types::f16),
fully_connected("fc2", input_info("input2"), { "reorder_dt" }, "", data_types::f16)
);

ExecutionConfig config = get_test_default_config(engine);
config.set_property(ov::intel_gpu::optimize_data(true));
config.set_property(ov::intel_gpu::allow_new_shape_infer(true));

if (engine.get_device_info().supports_immad) {
ov::intel_gpu::ImplementationDesc fc_impl = { format::bfyx, "", impl_types::onednn };
config.set_property(ov::intel_gpu::force_implementations(ov::intel_gpu::ImplForcingMap{ {"fc1", fc_impl} }));
}

cldnn::network network(engine, topology, config);
network.set_input_data("input", input);
network.set_input_data("input2", input2);

auto outputs = network.execute();
auto output = outputs.at("fc1").get_memory();
cldnn::mem_lock<ov::float16> output_ptr(output, get_test_stream());
}

0 comments on commit 7df9c21

Please sign in to comment.