Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Draft] Experimental Python API #14492

Draft
wants to merge 4 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
131 changes: 130 additions & 1 deletion docs/howto/how-to-use-nnfw-python-api.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,137 @@ outputs = session.inference()

## Run Inference with app on the target devices

reference app : [minimal-python app](https://github.com/Samsung/ONE/blob/master/runtime/onert/sample/minimal-python)
reference app : [minimal-python app](https://github.com/Samsung/ONE/blob/master/runtime/onert/sample/minimal-python/infer)

```
$ python3 minimal.py path_to_nnpackage_directory
```

## Experimental API

### Train with dataset

1. Import the Module and Initialize TrainSession

```python
import onert

# Create a training session and load the nnpackage
# Default backends is set to "train".
session = onert.experimental.train.session(nnpackage_path, backends="train")
```

2. Prepare Input and Output Data

```python
# Create a DataLoader

from onert.experimental.train import DataLoader

# Define the paths for input and expected output data
input_path = "path/to/input_data.npy"
expected_path = "path/to/expected_data.npy"

# Define batch size
batch_size = 16

# Initialize DataLoader
data_loader = DataLoader(input_dataset=input_path,
expected_dataset=expected_path,
batch_size=batch_size)
```

3. Compile the Session

```python
# Set Optimizer, Loss, and Metrics

from onert.experimental.train import optimizer, losses, metrics

# Define optimizer
optimizer_fn = optimizer.Adam(learning_rate=0.01)

# Define loss function
loss_fn = losses.CategoricalCrossentropy()

# Define metrics
metric_list = [metrics.CategoricalAccuracy()]

# Compile the training session
session.compile(optimizer=optimizer_fn, loss=loss_fn, metrics=metric_list, batch_size=batch_size)
```

4. Train the Model

```python
# Train and Validate

# Train the model
session.train(data_loader=data_loader,
epochs=5,
validation_split=0.2,
checkpoint_path="checkpoint.ckpt")
```

5. Train one step with data loader (Optional)

```python
for batch_idx, (inputs, expecteds) in enumerate(data_loader):
# Train on a single step
results = sess.train_step(inputs, expecteds)
```

### Custom Metric

You can use custom metrics instread of provided metrics

```python
from onert.experimental.train import metrics

class CustomMeanAbsoluteError(Metric):
"""
Custom metric to calculate the mean absolute error (MAE) between predictions and ground truth.
"""
def __init__(self):
self.total_absolute_error = 0.0
self.total_samples = 0

def update_state(self, outputs, expecteds):
"""
Update the metric's state based on the outputs and expected values.

Args:
outputs (list of np.ndarray): List of model outputs.
expecteds (list of np.ndarray): List of expected (ground truth) values.
"""
for output, expected in zip(outputs, expecteds):
self.total_absolute_error += np.sum(np.abs(output - expected))
self.total_samples += expected.size

def result(self):
"""
Calculate and return the current mean absolute error.

Returns:
float: The mean absolute error.
"""
return self.total_absolute_error / self.total_samples if self.total_samples > 0 else 0.0

def reset_state(self):
"""
Reset the metric's state for the next epoch.
"""
self.total_absolute_error = 0.0
self.total_samples = 0

# Add the custom metric to the list
metric_list = [
CustomMeanAbsoluteError()
]

# Compile the session with the custom metric
session.compile(optimizer=optimizer_fn, loss=loss_fn, metrics=metric_list, batch_size=batch_size)
```

### Run Train with dataset on the target devices
reference app : [minimal-python app](https://github.com/Samsung/ONE/blob/master/runtime/onert/sample/minimal-python/experimental/)
2 changes: 2 additions & 0 deletions infra/nnfw/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)

### CMAKE_BUILD_TYPE_LC: Build type lower case
string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LC)
# string(TOLOWER ${CMAKE_BUILD_TYPE} "debug")
set(CMAKE_CXX_FLAGS_DEBUG "-g")

set(NNAS_PROJECT_SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/../.." CACHE
INTERNAL "Where to find nnas top-level source directory"
Expand Down
27 changes: 19 additions & 8 deletions infra/nnfw/python/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,12 +52,23 @@

# copy *py files to package_directory
PY_DIR = os.path.join(THIS_FILE_DIR, '../../../runtime/onert/api/python/package')
for py_file in os.listdir(PY_DIR):
if py_file.endswith(".py"):
src_path = os.path.join(PY_DIR, py_file)
dest_path = os.path.join(THIS_FILE_DIR, package_directory)
shutil.copy(src_path, dest_path)
print(f"Copied '{src_path}' to '{dest_path}'")
for root, dirs, files in os.walk(PY_DIR):
# Calculate the relative path from the source directory
rel_path = os.path.relpath(root, PY_DIR)
dest_dir = os.path.join(THIS_FILE_DIR, package_directory)
dest_sub_dir = os.path.join(dest_dir, rel_path)
print(f"dest_sub_dir '{dest_sub_dir}'")

# Ensure the corresponding destination subdirectory exists
os.makedirs(dest_sub_dir, exist_ok=True)

# Copy only .py files
for py_file in files:
if py_file.endswith(".py"):
src_path = os.path.join(root, py_file)
# dest_path = os.path.join(THIS_FILE_DIR, package_directory)
shutil.copy(src_path, dest_sub_dir)
print(f"Copied '{src_path}' to '{dest_sub_dir}'")

# remove architecture directory
if os.path.exists(package_directory):
Expand Down Expand Up @@ -136,12 +147,12 @@ def get_directories():
# copy .so files to architecture directories

setup(name=package_name,
version='0.1.0',
version='0.2.0',
description='onert API binding',
long_description='It provides onert Python api',
url='https://github.com/Samsung/ONE',
license='Apache-2.0, MIT, BSD-2-Clause, BSD-3-Clause, Mozilla Public License 2.0',
has_ext_modules=lambda: True,
packages=[package_directory],
packages=find_packages(),
package_data={package_directory: so_list},
install_requires=['numpy >= 1.19'])
76 changes: 76 additions & 0 deletions runtime/onert/api/python/include/nnfw_api_wrapper.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,24 @@
* limitations under the License.
*/

#ifndef __ONERT_API_PYTHON_NNFW_API_WRAPPER_H__
#define __ONERT_API_PYTHON_NNFW_API_WRAPPER_H__

#include "nnfw.h"
#include "nnfw_experimental.h"

#include <pybind11/stl.h>
#include <pybind11/numpy.h>

namespace py = pybind11;

namespace onert
{
namespace api
{
namespace python
{

/**
* @brief tensor info describes the type and shape of tensors
*
Expand Down Expand Up @@ -120,6 +131,7 @@ class NNFW_SESSION

void close_session();
void set_input_tensorinfo(uint32_t index, const tensorinfo *tensor_info);
void prepare();
void run();
void run_async();
void wait();
Expand Down Expand Up @@ -159,4 +171,68 @@ class NNFW_SESSION
void set_output_layout(uint32_t index, const char *layout);
tensorinfo input_tensorinfo(uint32_t index);
tensorinfo output_tensorinfo(uint32_t index);

//////////////////////////////////////////////
// Experimental APIs for training
//////////////////////////////////////////////
nnfw_train_info train_get_traininfo();
void train_set_traininfo(const nnfw_train_info *info);

template <typename T> void train_set_input(uint32_t index, py::array_t<T> &buffer)
{
nnfw_tensorinfo tensor_info;
nnfw_input_tensorinfo(this->session, index, &tensor_info);

py::buffer_info buf_info = buffer.request();
const auto buf_shape = buf_info.shape;
assert(tensor_info.rank == static_cast<int32_t>(buf_shape.size()) && buf_shape.size() > 0);
tensor_info.dims[0] = static_cast<int32_t>(buf_shape.at(0));

ensure_status(nnfw_train_set_input(this->session, index, buffer.request().ptr, &tensor_info));
}
template <typename T> void train_set_expected(uint32_t index, py::array_t<T> &buffer)
{
nnfw_tensorinfo tensor_info;
nnfw_output_tensorinfo(this->session, index, &tensor_info);

py::buffer_info buf_info = buffer.request();
const auto buf_shape = buf_info.shape;
assert(tensor_info.rank == static_cast<int32_t>(buf_shape.size()) && buf_shape.size() > 0);
tensor_info.dims[0] = static_cast<int32_t>(buf_shape.at(0));

ensure_status(
nnfw_train_set_expected(this->session, index, buffer.request().ptr, &tensor_info));
}
template <typename T> void train_set_output(uint32_t index, py::array_t<T> &buffer)
{
nnfw_tensorinfo tensor_info;
nnfw_output_tensorinfo(this->session, index, &tensor_info);
NNFW_TYPE type = tensor_info.dtype;
uint32_t output_elements = num_elems(&tensor_info);
size_t length = sizeof(T) * output_elements;

ensure_status(nnfw_train_set_output(session, index, type, buffer.request().ptr, length));
}

void train_prepare();
void train(bool update_weights);
float train_get_loss(uint32_t index);

void train_export_circle(const py::str &path);
void train_import_checkpoint(const py::str &path);
void train_export_checkpoint(const py::str &path);

//////////////////////////////////////////////
// Optional APIs for training
//////////////////////////////////////////////
// nnfw_tensorinfo train_input_tensorinfo(uint32_t index);
// nnfw_tensorinfo train_expected_tensorinfo(uint32_t index);

// TODO Add other apis
};

} // namespace python
} // namespace api
} // namespace onert

#endif // __ONERT_API_PYTHON_NNFW_API_WRAPPER_H__
28 changes: 28 additions & 0 deletions runtime/onert/api/python/include/nnfw_session_bindings.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
/*
* Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef __ONERT_API_PYTHON_NNFW_SESSION_BINDINGS_H__
#define __ONERT_API_PYTHON_NNFW_SESSION_BINDINGS_H__

#include <pybind11/pybind11.h>

// Declare binding common functions
void bind_nnfw_session(pybind11::module_ &m);

// Declare binding experimental functinos
void bind_experimental_nnfw_session(pybind11::module_ &m);

#endif // __ONERT_API_PYTHON_NNFW_SESSION_BINDINGS_H__
25 changes: 25 additions & 0 deletions runtime/onert/api/python/include/nnfw_tensorinfo_bindings.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
/*
* Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef __ONERT_API_PYTHON_NNFW_TENSORINFO_BINDINGS_H__
#define __ONERT_API_PYTHON_NNFW_TENSORINFO_BINDINGS_H__

#include <pybind11/pybind11.h>

// Declare binding tensorinfo
void bind_tensorinfo(pybind11::module_ &m);

#endif // __ONERT_API_PYTHON_NNFW_TENSORINFO_BINDINGS_H__
34 changes: 34 additions & 0 deletions runtime/onert/api/python/include/nnfw_traininfo_bindings.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
/*
* Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef __ONERT_API_PYTHON_NNFW_TRAININFO_BINDINGS_H__
#define __ONERT_API_PYTHON_NNFW_TRAININFO_BINDINGS_H__

#include <pybind11/pybind11.h>
#include <pybind11/stl.h>

namespace py = pybind11;

// Declare binding train enums
void bind_nnfw_train_enums(py::module_ &m);

// Declare binding loss info
void bind_nnfw_loss_info(py::module_ &m);

// Declare binding train info
void bind_nnfw_train_info(py::module_ &m);

#endif // __ONERT_API_PYTHON_NNFW_TRAININFO_BINDINGS_H__
Loading