Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[DO NOT LAND] Cherry-pick pip fixes from release/0.2 into main #3462

Closed
wants to merge 12 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions .buckconfig
Original file line number Diff line number Diff line change
Expand Up @@ -23,3 +23,14 @@

[parser]
target_platform_detector_spec = target:root//...->prelude//platforms:default target:shim//...->prelude//platforms:default

# Limit the number of files that the buck daemon needs to monitor. If every
# submodule is cloned recursively, some system can fail to build with "OS file
# watch limit reached".
[project]
ignore = \
.git, \
**/.git, \
third-party/pytorch/third_party, \
cmake-out, \
pip-out
7 changes: 5 additions & 2 deletions .ci/scripts/utils.sh
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,11 @@ retry () {
install_executorch() {
which pip
# Install executorch, this assumes that Executorch is checked out in the
# current directory
pip install . --no-build-isolation -v
# current directory. The --extra-index-url options tell pip to look on the
# pytorch servers for nightly and pre-release versions of torch packages.
pip install . --no-build-isolation -v \
--extra-index-url https://download.pytorch.org/whl/test/cpu \
--extra-index-url https://download.pytorch.org/whl/nightly/cpu
# Just print out the list of packages for debugging
pip list
}
Expand Down
5 changes: 5 additions & 0 deletions .github/workflows/build-wheels-linux.yml
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,11 @@ jobs:
test-infra-repository: pytorch/test-infra
test-infra-ref: main
build-matrix: ${{ needs.generate-matrix.outputs.matrix }}
# ExecuTorch only needs the first layer of submodules; override the
# "recursive" default to do less work, and to give the buck daemon fewer
# files to look at.
submodules: true
env-var-script: build/packaging/env_var_script_linux.sh
pre-script: ${{ matrix.pre-script }}
post-script: ${{ matrix.post-script }}
package-name: ${{ matrix.package-name }}
Expand Down
6 changes: 6 additions & 0 deletions .github/workflows/build-wheels-m1.yml
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,12 @@ jobs:
test-infra-repository: pytorch/test-infra
test-infra-ref: main
build-matrix: ${{ needs.generate-matrix.outputs.matrix }}
# ExecuTorch only needs the first layer of submodules; override the
# "recursive" default to do less work, and to give the buck daemon fewer
# files to look at.
submodules: true
delocate-wheel: false
env-var-script: build/packaging/env_var_script_m1.sh
pre-script: ${{ matrix.pre-script }}
post-script: ${{ matrix.post-script }}
package-name: ${{ matrix.package-name }}
Expand Down
29 changes: 26 additions & 3 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -575,9 +575,14 @@ if(EXECUTORCH_BUILD_PYBIND)
list(APPEND _dep_libs custom_ops)
endif()
# compile options for pybind

set(_pybind_compile_options -Wno-deprecated-declarations -fPIC -frtti
-fexceptions)
set(_pybind_compile_options
-Wno-deprecated-declarations
-fPIC
-frtti
-fexceptions
# libtorch is built with the old ABI, so we need to do the same for any
# .cpp files that include torch, c10, or ATen targets.
-D_GLIBCXX_USE_CXX11_ABI=0)
# util lib
add_library(
util
Expand All @@ -599,6 +604,24 @@ if(EXECUTORCH_BUILD_PYBIND)
target_include_directories(portable_lib PRIVATE ${TORCH_INCLUDE_DIRS})
target_compile_options(portable_lib PUBLIC ${_pybind_compile_options})
target_link_libraries(portable_lib PUBLIC ${_dep_libs})
if(APPLE)
# pip wheels will need to be able to find the torch libraries. On Linux, the
# .so has non-absolute dependencies on libs like "libtorch.so" without
# paths; as long as we `import torch` first, those dependencies will work.
# But Apple dylibs do not support non-absolute dependencies, so we need to
# tell the loader where to look for its libraries. The LC_LOAD_DYLIB entries
# for the torch libraries will look like "@rpath/libtorch.dylib", so we can
# add an LC_RPATH entry to look in a directory relative to the installed
# location of our _portable_lib.so file. To see these LC_* values, run
# `otool -l _portable_lib*.so`.
set_target_properties(
portable_lib
PROPERTIES # Assume that this library will be installed in
# `site-packages/executorch/extension/pybindings`, and that
# the torch libs are in `site-packages/torch/lib`.
BUILD_RPATH "@loader_path/../../../torch/lib"
INSTALL_RPATH "@loader_path/../../../torch/lib")
endif()

install(TARGETS portable_lib
LIBRARY DESTINATION executorch/extension/pybindings)
Expand Down
39 changes: 39 additions & 0 deletions README-wheel.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
**ExecuTorch** is a [PyTorch](https://pytorch.org/) platform that provides
infrastructure to run PyTorch programs everywhere from AR/VR wearables to
standard on-device iOS and Android mobile deployments. One of the main goals for
ExecuTorch is to enable wider customization and deployment capabilities of the
PyTorch programs.

The `executorch` pip package is in alpha.
* Supported python versions: 3.10, 3.11
* Compatible systems: Linux x86_64, macOS aarch64

The prebuilt `executorch.extension.pybindings.portable_lib` module included in
this package provides a way to run ExecuTorch `.pte` files, with some
restrictions:
* Only [core ATen
operators](https://pytorch.org/executorch/stable/ir-ops-set-definition.html)
are linked into the prebuilt module
* Only the [XNNPACK backend
delegate](https://pytorch.org/executorch/main/native-delegates-executorch-xnnpack-delegate.html)
is linked into the prebuilt module

Please visit the [ExecuTorch website](https://pytorch.org/executorch/) for
tutorials and documentation. Here are some starting points:
* [Getting
Started](https://pytorch.org/executorch/stable/getting-started-setup.html)
* Set up the ExecuTorch environment and run PyTorch models locally.
* [Working with
local LLMs](https://pytorch.org/executorch/stable/llm/getting-started.html)
* Learn how to use ExecuTorch to export and accelerate a large-language model
from scratch.
* [Exporting to
ExecuTorch](https://pytorch.org/executorch/main/tutorials/export-to-executorch-tutorial.html)
* Learn the fundamentals of exporting a PyTorch `nn.Module` to ExecuTorch, and
optimizing its performance using quantization and hardware delegation.
* Running LLaMA on
[iOS](https://pytorch.org/executorch/stable/llm/llama-demo-ios.html) and
[Android](https://pytorch.org/executorch/stable/llm/llama-demo-android.html)
devices.
* Build and run LLaMA in a demo mobile app, and learn how to integrate models
with your own apps.
6 changes: 6 additions & 0 deletions build/Utils.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,12 @@ function(resolve_buck2)
PARENT_SCOPE)
endif()
endif()

# The buck2 daemon can get stuck. Killing it can help.
message(STATUS "Killing buck2 daemon")
execute_process(
COMMAND "${BUCK2} kill"
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
endfunction()

# Sets the value of the PYTHON_EXECUTABLE variable to 'python' if in an active
Expand Down
20 changes: 20 additions & 0 deletions build/packaging/env_var_script_linux.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

# This file is sourced into the environment before building a pip wheel. It
# should typically only contain shell variable assignments. Be sure to export
# any variables so that subprocesses will see them.

# Enable pybindings so that users can execute ExecuTorch programs from python.
export EXECUTORCH_BUILD_PYBIND=1

# Ensure that CMAKE_ARGS is defined before referencing it. Defaults to empty
# if not defined.
export CMAKE_ARGS="${CMAKE_ARGS:-}"

# Link the XNNPACK backend into the pybindings runtime so that users can execute
# ExecuTorch programs that delegate to it.
CMAKE_ARGS="${CMAKE_ARGS} -DEXECUTORCH_BUILD_XNNPACK=ON"
26 changes: 26 additions & 0 deletions build/packaging/env_var_script_m1.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

# This file is sourced into the environment before building a pip wheel. It
# should typically only contain shell variable assignments. Be sure to export
# any variables so that subprocesses will see them.

# Enable pybindings so that users can execute ExecuTorch programs from python.
export EXECUTORCH_BUILD_PYBIND=1

# Ensure that CMAKE_ARGS is defined before referencing it. Defaults to empty
# if not defined.
export CMAKE_ARGS="${CMAKE_ARGS:-}"

# Link the XNNPACK backend into the pybindings runtime so that users can execute
# ExecuTorch programs that delegate to it.
CMAKE_ARGS="${CMAKE_ARGS} -DEXECUTORCH_BUILD_XNNPACK=ON"

# When building for macOS, link additional backends into the pybindings runtime.

# TODO(dbort): Make these build properly in the CI environment.
# CMAKE_ARGS="${CMAKE_ARGS} -DEXECUTORCH_BUILD_COREML=ON"
# CMAKE_ARGS="${CMAKE_ARGS} -DEXECUTORCH_BUILD_MPS=ON"
19 changes: 17 additions & 2 deletions build/packaging/pre_build_script.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,21 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

set -eux
set -euxo pipefail

echo "This script is run before building ExecuTorch binaries"
# This script is run before building ExecuTorch binaries

# Manually install build requirements because `python setup.py bdist_wheel` does
# not install them. TODO(dbort): Switch to using `python -m build --wheel`,
# which does install them. Though we'd need to disable build isolation to be
# able to see the installed torch package.
readonly BUILD_DEPS=(
# This list must match the build-system.requires list from pyproject.toml.
"cmake"
"pyyaml"
"setuptools"
"tomli"
"wheel"
"zstd"
)
pip install --progress-bar off "${BUILD_DEPS[@]}"
96 changes: 91 additions & 5 deletions build/packaging/smoke_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,99 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

"""
This script is run by CI after building the executorch wheel. Before running
this, the job will install the matching torch package as well as the newly-built
executorch package and its dependencies.
"""

# Import this first. If it can't find the torch.so libraries, the dynamic load
# will fail and the process will exit.
from executorch.extension.pybindings import portable_lib # usort: skip

# Import this after importing the ExecuTorch pybindings. If the pybindings
# links against a different torch.so than this uses, there will be a set of
# symbol comflicts; the process will either exit now, or there will be issues
# later in the smoke test.
import torch # usort: skip

# Import everything else later to help isolate the critical imports above.
import os
import tempfile
from typing import Tuple

from executorch.exir import to_edge
from torch.export import export


class LinearModel(torch.nn.Module):
"""Runs Linear on its input, which should have shape [4]."""

def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(4, 2)

def forward(self, x: torch.Tensor):
"""Expects a single tensor of shape [4]."""
return self.linear(x)


def linear_model_inputs() -> Tuple[torch.Tensor]:
"""Returns some example inputs compatible with LinearModel."""
# The model takes a single tensor of shape [4] as an input.
return (torch.ones(4),)


def export_linear_model() -> bytes:
"""Exports LinearModel and returns the .pte data."""

# This helps the exporter understand the shapes of tensors used in the model.
# Since our model only takes one input, this is a one-tuple.
example_inputs = linear_model_inputs()

# Export the pytorch model and process for ExecuTorch.
print("Exporting program...")
exported_program = export(LinearModel(), example_inputs)
print("Lowering to edge...")
edge_program = to_edge(exported_program)
print("Creating ExecuTorch program...")
et_program = edge_program.to_executorch()

return et_program.buffer


def main():
"""
Run ExecuTorch binary smoke tests. This is a placeholder for future tests. See
https://github.com/pytorch/test-infra/wiki/Using-Nova-Reusable-Build-Workflows
for more information about Nova binary workflow.
"""
"""Tests the export and execution of a simple model."""

# If the pybindings loaded correctly, we should be able to ask for the set
# of operators.
ops = portable_lib._get_operator_names()
assert len(ops) > 0, "Empty operator list"
print(f"Found {len(ops)} operators; first element '{ops[0]}'")

# Export LinearModel to .pte data.
pte_data: bytes = export_linear_model()

# Try saving to and loading from a file.
with tempfile.TemporaryDirectory() as tempdir:
pte_file = os.path.join(tempdir, "linear.pte")

# Save the .pte data to a file.
with open(pte_file, "wb") as file:
file.write(pte_data)
print(f"ExecuTorch program saved to {pte_file} ({len(pte_data)} bytes).")

# Load the model from disk.
m = portable_lib._load_for_executorch(pte_file)

# Run the model.
outputs = m.forward(linear_model_inputs())

# Should see a single output with shape [2].
assert len(outputs) == 1, f"Unexpected output length {len(outputs)}: {outputs}"
assert outputs[0].shape == (2,), f"Unexpected output size {outputs[0].shape}"

print("PASS")


if __name__ == "__main__":
Expand Down
4 changes: 3 additions & 1 deletion examples/models/llava_encoder/install_requirements.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

set -x

# install llava from the submodule
pip install --force-reinstall -e examples/third-party/LLaVA

Expand All @@ -19,4 +21,4 @@ pip install bitsandbytes -I
# For example, torch version required from llava is older than ExecuTorch.
# To make both work, recover ExecuTorch's original dependencies by rerunning
# the install_requirements.sh.
./install_requirements.sh
bash -x ./install_requirements.sh
5 changes: 4 additions & 1 deletion install_requirements.sh
Original file line number Diff line number Diff line change
Expand Up @@ -105,9 +105,12 @@ $PIP_EXECUTABLE install --extra-index-url "${TORCH_NIGHTLY_URL}" \

#
# Install executorch pip package. This also makes `flatc` available on the path.
# The --extra-index-url may be necessary if pyproject.toml has a dependency on a
# pre-release or nightly version of a torch package.
#

EXECUTORCH_BUILD_PYBIND="${EXECUTORCH_BUILD_PYBIND}" \
CMAKE_ARGS="${CMAKE_ARGS}" \
CMAKE_BUILD_ARGS="${CMAKE_BUILD_ARGS}" \
$PIP_EXECUTABLE install . --no-build-isolation -v
$PIP_EXECUTABLE install . --no-build-isolation -v \
--extra-index-url "${TORCH_URL}"
Loading
Loading