Skip to content

Commit

Permalink
TF2-C-API: find input and output names using MetaGraphDef (#424)
Browse files Browse the repository at this point in the history
* TF2-C-API: find input and output names using MetaGraphDef

* address PR comments.
  • Loading branch information
Jian Sheng authored Apr 12, 2022
1 parent 9af317c commit 43403d0
Show file tree
Hide file tree
Showing 5 changed files with 101 additions and 107 deletions.
9 changes: 6 additions & 3 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ if(USE_TENSORRT)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(TENSORRT DEFAULT_MSG TENSORRT_INCLUDE_DIR TENSORRT_LIB_DIR)
if(NOT TENSORRT_FOUND)
message(ERROR "Could not find TensorRT.")
message(FATAL_ERROR "Could not find TensorRT.")
endif()
message(STATUS "TENSORRT_LIB_DIR: " ${TENSORRT_LIB_DIR})
include_directories(${TENSORRT_INCLUDE_DIR})
Expand All @@ -232,9 +232,12 @@ if(USE_TENSORRT)
endif()
if(WITH_TENSORFLOW2_LIB)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDLR_TENSORFLOW2")
include_directories("${WITH_TENSORFLOW2_LIB}/include")
if(NOT TENSORFLOW2_INCLUDE)
message(FATAL_ERROR "TENSORFLOW2_INCLUDE is required.")
endif()
include_directories("${TENSORFLOW2_INCLUDE}" "${TENSORFLOW2_INCLUDE}/src")
list(APPEND DLR_SRC "src/dlr_tensorflow2/dlr_tensorflow2.cc")
list(APPEND DLR_LINKER_LIBS -L${WITH_TENSORFLOW2_LIB}/lib -ltensorflow)
list(APPEND DLR_LINKER_LIBS -L${WITH_TENSORFLOW2_LIB} -ltensorflow -ltensorflow_framework)
endif()
if(WITH_HEXAGON)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDLR_HEXAGON")
Expand Down
23 changes: 21 additions & 2 deletions doc/install.rst
Original file line number Diff line number Diff line change
Expand Up @@ -267,16 +267,35 @@ We can use DLR to run Tensorflow 2.x saved models (including `TensorRT converted

TensorFlow C library can be downloaded from `tensorflow.org <https://www.tensorflow.org/install/lang_c>`_ or built `from source <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/lib_package/README.md>`_.

TensorFlow C/C++ headers can be obtained by building the following Tensorflow target.

.. code-block:: bash
bazel build --config=opt tensorflow:install_headers
# The headers will be saved to bazel-bin/tensorflow/include
Another way to get TensorFlow C/C++ headers is to install Tensorflow python package using pip. The headers can be found inside the installation folder, e.g.

.. code-block:: bash
sudo pip3 install tensorflow==<tf_version>
# The headers can be found in /usr/local/lib/python3.x/dist-packages/tensorflow/include
To build DLR with TensorFlow C library:

.. code-block:: bash
# Build DLR with libtensorflow
cmake .. -DWITH_TENSORFLOW2_LIB=<path to libtensorflow folder>
cmake .. \
-DWITH_TENSORFLOW2_LIB=<path to libtensorflow lib folder> \
-DTENSORFLOW2_INCLUDE=<tensorflow include folder>
make -j8
# Test DLR with libtensorflow
export LD_LIBRARY_PATH=<path to libtensorflow folder>/lib
export LD_LIBRARY_PATH=<path to libtensorflow lib folder>
./dlr_tensorflow2_test
Expand Down
12 changes: 8 additions & 4 deletions include/dlr_tensorflow2/dlr_tensorflow2.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
#include "dlr.h"
#include "dlr_common.h"
#include "tensorflow/c/c_api.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"

namespace dlr {

Expand All @@ -14,22 +15,23 @@ void PrepareTF2ConfigProto(const DLR_TF2Config& tf2_config, std::vector<std::uin
/*! \brief class Tensorflow2Model
*/
class Tensorflow2Model : public DLRModel {
typedef google::protobuf::Map<std::string, tensorflow::TensorInfo> InputOutputType;

private:
TF_Status* status_;
TF_Graph* graph_;
TF_Session* sess_;
std::vector<std::vector<int64_t>> graph_input_shapes_; // might have -1 dimensions
std::vector<std::string> output_names_;
std::set<std::string> ignored_names_;
std::vector<std::string> output_types_;
std::vector<std::string> input_tensor_names_;
std::vector<std::string> output_tensor_names_;
std::vector<TF_Output> inputs_;
std::vector<TF_Output> outputs_;
std::vector<TF_Tensor*> input_tensors_;
std::vector<TF_Tensor*> output_tensors_;
TF_Output ParseTensorName(const std::string& t_name);
void DetectInputs();
void DetectOutputs();
void DetectInputShapes();
void DetectInputsAndOutputs(const InputOutputType& inputs, const InputOutputType& outputs);
void PrepInputs();
void PrepOutputs();
int GetInputId(const char* name);
Expand All @@ -54,6 +56,8 @@ class Tensorflow2Model : public DLRModel {
int dim) override;
virtual void Run() override;
virtual void GetOutput(int index, void* out) override;
virtual int GetOutputIndex(const char* name) const override;
virtual void GetOutputByName(const char* name, void* out) override;
virtual const void* GetOutputPtr(int index) const override;
virtual const char* GetOutputName(const int index) const override;
virtual void GetOutputShape(int index, int64_t* shape) const override;
Expand Down
145 changes: 49 additions & 96 deletions src/dlr_tensorflow2/dlr_tensorflow2.cc
Original file line number Diff line number Diff line change
Expand Up @@ -65,58 +65,39 @@ TF_Output Tensorflow2Model::ParseTensorName(const std::string& t_name) {
return oper_out;
}

void Tensorflow2Model::DetectInputs() {
size_t pos = 0;
TF_Operation* op;
while ((op = TF_GraphNextOperation(graph_, &pos)) != nullptr) {
const std::string op_type = TF_OperationOpType(op);
const int n_in = TF_OperationNumInputs(op);
const int n_out = TF_OperationNumOutputs(op);
const std::string op_name = TF_OperationName(op);
if (op_type == "Placeholder" && n_in == 0 && n_out == 1 && ignored_names_.count(op_name) == 0) {
input_names_.push_back(op_name + ":0");
void Tensorflow2Model::DetectInputsAndOutputs(const InputOutputType& inputs,
const InputOutputType& outputs) {
for (auto& el : inputs) {
const tensorflow::TensorInfo& ti = el.second;
input_names_.push_back(el.first);
input_tensor_names_.push_back(ti.name());
const tensorflow::TensorShapeProto& shape = ti.tensor_shape();
int dim_size = shape.dim_size();
std::vector<int64_t> dims;
for (int i = 0; i < dim_size; i++) {
const tensorflow::TensorShapeProto_Dim& dim = shape.dim(i);
int64_t dim_sz = dim.size();
dims.push_back(dim_sz);
}
graph_input_shapes_.push_back(dims);
}
for (auto& el : outputs) {
const tensorflow::TensorInfo& ti = el.second;
output_names_.push_back(el.first);
output_tensor_names_.push_back(ti.name());
}
num_inputs_ = input_names_.size();
std::string msg = "Found " + std::to_string(num_inputs_) + " possible inputs: ";
num_outputs_ = output_names_.size();
input_shapes_.resize(num_inputs_);
std::string msg = "Found " + std::to_string(num_inputs_) + " inputs: ";
for (int i = 0; i < num_inputs_; i++) {
if (i > 0) {
msg += ", ";
}
msg += input_names_[i];
}
LOG(INFO) << msg;
}

void Tensorflow2Model::DetectOutputs() {
size_t pos = 0;
TF_Operation* op;
// while loop
while ((op = TF_GraphNextOperation(graph_, &pos)) != nullptr) {
const std::string op_type = TF_OperationOpType(op);
const int n_out = TF_OperationNumOutputs(op);
const int n_cout = TF_OperationNumControlOutputs(op);
const std::string op_name = TF_OperationName(op);
if (op_type != "Const" && op_type != "Assign" && op_type != "NoOp" &&
op_type != "Placeholder" && n_cout == 0 && ignored_names_.count(op_name) == 0) {
int n_consumers = 0;
for (int i = 0; i < n_out; i++) {
const TF_Output tf_out = {op, i};
n_consumers += TF_OperationOutputNumConsumers(tf_out);
if (n_consumers != 0) {
break;
}
}
if (n_consumers != 0) {
continue; // while loop
}
for (int i = 0; i < n_out; i++) {
output_names_.push_back(op_name + ":" + std::to_string(i));
}
}
}
num_outputs_ = output_names_.size();
std::string msg = "Found " + std::to_string(num_outputs_) + " possible outputs: ";
msg = "Found " + std::to_string(num_outputs_) + " outputs: ";
for (int i = 0; i < num_outputs_; i++) {
if (i > 0) {
msg += ", ";
Expand All @@ -126,27 +107,6 @@ void Tensorflow2Model::DetectOutputs() {
LOG(INFO) << msg;
}

void Tensorflow2Model::DetectInputShapes() {
for (int i = 0; i < num_inputs_; i++) {
const std::string& t_name = input_names_[i];
const TF_Output oper_out = ParseTensorName(t_name);

int n_dim = TF_GraphGetTensorNumDims(graph_, oper_out, status_);
if (TF_GetCode(status_) != TF_OK) {
LOG(FATAL) << "ERROR: TF_GraphGetTensorNumDims failed " << TF_Message(status_);
return; // unreachable
}
int64_t dims[n_dim];
TF_GraphGetTensorShape(graph_, oper_out, dims, n_dim, status_);
if (TF_GetCode(status_) != TF_OK) {
LOG(FATAL) << "ERROR: TF_GraphGetTensorShape failed " << TF_Message(status_);
return; // unreachable
}
graph_input_shapes_.push_back(std::vector<int64_t>(dims, dims + n_dim));
}
input_shapes_.resize(num_inputs_);
}

TF_Tensor* Tensorflow2Model::AllocateInputTensor(int index, const int64_t* dims, const int n_dim) {
const TF_Output oper_out = inputs_[index];
size_t num_elements = 1;
Expand All @@ -165,7 +125,7 @@ TF_Tensor* Tensorflow2Model::AllocateInputTensor(int index, const int64_t* dims,
}

void Tensorflow2Model::PrepInputs() {
for (std::string& t_name : input_names_) {
for (std::string& t_name : input_tensor_names_) {
TF_Output oper_out = ParseTensorName(t_name);
const TF_DataType t_type = TF_OperationOutputType(oper_out);
input_types_.push_back(std::to_string((int)t_type));
Expand All @@ -176,7 +136,7 @@ void Tensorflow2Model::PrepInputs() {
}

void Tensorflow2Model::PrepOutputs() {
for (std::string& t_name : output_names_) {
for (std::string& t_name : output_tensor_names_) {
TF_Output oper_out = ParseTensorName(t_name);
const TF_DataType t_type = TF_OperationOutputType(oper_out);
output_types_.push_back(std::to_string((int)t_type));
Expand Down Expand Up @@ -216,45 +176,23 @@ Tensorflow2Model::Tensorflow2Model(const std::string& model_path, const DLDevice
}
}
TF_Buffer* run_opts = nullptr;
TF_Buffer* meta_graph_def = nullptr;
TF_Buffer* meta_graph = TF_NewBuffer();
const char* tags = "serve";
int ntags = 1;
sess_ = TF_LoadSessionFromSavedModel(sess_opts, run_opts, model_path.c_str(), &tags, ntags,
graph_, meta_graph_def, status_);
graph_, meta_graph, status_);
if (TF_GetCode(status_) != TF_OK) {
LOG(FATAL) << "ERROR: Unable to create Session " << TF_Message(status_);
return; // unreachable
}
tensorflow::MetaGraphDef metagraph_def;
metagraph_def.ParseFromArray(meta_graph->data, meta_graph->length);
TF_DeleteBuffer(meta_graph);
const tensorflow::SignatureDef& serving_default_def =
metagraph_def.signature_def().at("serving_default");

DetectInputsAndOutputs(serving_default_def.inputs(), serving_default_def.outputs());

auto metadata = GetMetadataFile(model_path);
if (!metadata.empty() && !IsFileEmpty(metadata)) {
LOG(INFO) << "Loading metadata file: " << metadata;
LoadJsonFromFile(metadata, this->metadata_);
LOG(INFO) << "Input and Output names from metadata file";
LOG(INFO) << "Input Names:";
for (auto& el : this->metadata_.at("Model").at("Inputs")) {
input_names_.push_back(el.at("name"));
LOG(INFO) << el.at("name");
}
LOG(INFO) << "Output Names:";
for (auto& el : this->metadata_.at("Model").at("Outputs")) {
output_names_.push_back(el.at("name"));
LOG(INFO) << el.at("name");
}
num_inputs_ = input_names_.size();
num_outputs_ = output_names_.size();
} else {
ignored_names_ = {
"saver_filename", // name of the checkpoint
"StatefulPartitionedCall_1", // the loss
"StatefulPartitionedCall_2" // save operation
};
LOG(WARNING) << "Metadata file was not found. Auto-detecting Input and Output names. This may "
"not work correctly for some models...";
DetectInputs();
DetectOutputs();
}
DetectInputShapes();
PrepInputs();
PrepOutputs();

Expand Down Expand Up @@ -378,6 +316,21 @@ void Tensorflow2Model::GetOutput(int index, void* output) {
std::memcpy(output, out_t_data, num_bytes);
}

int Tensorflow2Model::GetOutputIndex(const char* name) const {
for (int i = 0; i < output_names_.size(); i++) {
if (output_names_[i].compare(name) == 0) {
return i;
}
}
LOG(FATAL) << "Output Tensor not found, name: " << name;
return -1; // unreachable
}

void Tensorflow2Model::GetOutputByName(const char* name, void* out) {
int index = GetOutputIndex(name);
GetOutput(index, out);
}

const void* Tensorflow2Model::GetOutputPtr(int index) const {
CHECK_LT(index, num_outputs_) << "Output index is out of range.";
const TF_Tensor* tensor = output_tensors_[index];
Expand Down
19 changes: 17 additions & 2 deletions tests/cpp/dlr_tensorflow2/dlr_tensorflow2_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -93,15 +93,23 @@ void CheckAllDLRMethods(DLRModelHandle& handle, const int batch_size, const int
FAIL() << "GetDLRInputName failed";
}
LOG(INFO) << "DLRInputName: " << input_name;
EXPECT_STREQ("serving_default_inputs:0", input_name);
EXPECT_STREQ("inputs", input_name);

// GetDLROutputName
const char* output_name;
if (GetDLROutputName(&handle, 0, &output_name)) {
FAIL() << "GetDLROutputName failed";
}
LOG(INFO) << "DLROutputName: " << output_name;
EXPECT_STREQ("StatefulPartitionedCall:0", output_name);
EXPECT_STREQ("logits", output_name);

// GetDLROutputIndex
int index = -1;
if (GetDLROutputIndex(&handle, output_name, &index)) {
FAIL() << "GetDLROutputIndex failed";
}
LOG(INFO) << "GetDLROutputIndex: " << index;
EXPECT_EQ(0, index);

// GetDLRInputType
const char* input_type;
Expand Down Expand Up @@ -198,6 +206,13 @@ void CheckAllDLRMethods(DLRModelHandle& handle, const int batch_size, const int
// Imagenet 281 - tabby, tabby cat
EXPECT_GE(output[out_offset + 281], 0.3f);
}

// GetDLROutputByName
std::vector<float> output_1(out_size);
if (GetDLROutputByName(&handle, "logits", output_1.data())) {
FAIL() << "GetDLROutputByName failed";
}
EXPECT_TRUE(std::equal(output_1.begin(), output_1.end(), output.begin()));
}

TEST(Tensorflow, CreateDLRModelFromTensorflow2) {
Expand Down

0 comments on commit 43403d0

Please sign in to comment.