Skip to content

Commit

Permalink
2024-04-05 nightly release (e5a8de0)
Browse files Browse the repository at this point in the history
  • Loading branch information
pytorchbot committed Apr 5, 2024
1 parent ec77c33 commit c13e79a
Show file tree
Hide file tree
Showing 25 changed files with 484 additions and 275 deletions.
11 changes: 11 additions & 0 deletions .github/workflows/apple.yml
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,17 @@ jobs:
zip -r "${RUNNER_TEMP}/artifacts/${FRAMEWORK}-${VERSION}.zip" "${FRAMEWORK}.xcframework"
) done
# Build Debug iOS Frameworks
PYTHON_EXECUTABLE=python ${CONDA_RUN} --no-capture-output \
build/build_apple_frameworks.sh --coreml --custom --mps --optimized --portable --quantized --xnnpack --Debug
# Bundle Debug iOS Frameworks
for FRAMEWORK in "${FRAMEWORKS[@]}"; do (
cd cmake-out && \
mv "${FRAMEWORK}.xcframework" "${FRAMEWORK}_debug.xcframework" && \
zip -r "${RUNNER_TEMP}/artifacts/${FRAMEWORK}_debug-${VERSION}.zip" "${FRAMEWORK}_debug.xcframework"
) done
popd
upload-frameworks-ios:
Expand Down
Empty file.
Empty file.
Empty file.
Empty file.
Empty file.
Empty file.
Empty file.
Empty file.
5 changes: 5 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,11 @@ if(EXECUTORCH_BUILD_PTHREADPOOL)
set(PTHREADPOOL_ALLOW_DEPRECATED_API
ON
CACHE BOOL "")
if(APPLE)
set(PTHREADPOOL_SYNC_PRIMITIVE
"condvar"
CACHE STRING "")
endif()
add_subdirectory("${PTHREADPOOL_SOURCE_DIR}")
endif()

Expand Down
152 changes: 75 additions & 77 deletions Package.swift
Original file line number Diff line number Diff line change
Expand Up @@ -9,95 +9,93 @@

import PackageDescription

let url = "https://ossci-ios.s3.amazonaws.com/executorch"
let version = "0.1.0"
let coreml_sha256 = "78d853d87be478696e56e658aa4ff17d47ae185a9a6a36316c821fa8b2d3aacd"
let custom_sha256 = "f059f6716298403dff89a952a70e323c54911be140d05f2467bd5cc61aaefae3"
let executorch_sha256 = "ba9a0c2b061afaedbc3c5454040a598b1371170bd9d9a30b7163c20e23339841"
let mps_sha256 = "39542a8671cca1aa627102aa47785d0f6e2dfe9a40e2c22288a755057b00fbfa"
let optimized_sha256 = "1d84fa16197bb6f0dec01aaa29d2a140c0e14d8e5e92630a7b4dd6f48012506d"
let portable_sha256 = "4993904f89ecb4476677ff3c072ed1a314a608170f10d364cfd23947851ccbf3"
let quantized_sha256 = "8d35ee0e7ca77c19782eaea07a1888f576cda679f8a4a5edb03d80ebe858047e"
let xnnpack_sha256 = "380e5185c4c48ede7cc0d0f0657ffb26df83cd9f55813d78593aea8a93942caf"

struct Framework {
let name: String
let checksum: String
var frameworks: [String] = []
var libraries: [String] = []

func target() -> Target {
.binaryTarget(
name: name,
url: "\(url)/\(name)-\(version).zip",
checksum: checksum
)
}

func dependencies() -> Target {
.target(
name: "\(name)_dependencies",
dependencies: [.target(name: name)],
path: ".swift/\(name)",
linkerSettings:
frameworks.map { .linkedFramework($0) } +
libraries.map { .linkedLibrary($0) }
)
}
}

let frameworks = [
Framework(
name: "coreml_backend",
checksum: coreml_sha256,
frameworks: [
let url = "https://ossci-ios.s3.amazonaws.com/executorch/"
let debug = "_debug"
let deliverables = [
"coreml_backend": [
"sha256": "0e5973bbc547e3a39f988f9a7a68b47bda0a6a17b04516fff6957fd527f8cd48",
"sha256" + debug: "c63773f0098625f884fecb11b4a5f6318b97d566329fef8b013444829cd7c421",
"frameworks": [
"Accelerate",
"CoreML",
],
libraries: [
"libraries": [
"sqlite3",
]
),
Framework(
name: "custom_backend",
checksum: custom_sha256
),
Framework(
name: "executorch",
checksum: executorch_sha256
),
Framework(
name: "mps_backend",
checksum: mps_sha256,
frameworks: [
],
],
"custom_backend": [
"sha256": "c8405e21324262cd6590046096ddeb3ac33a598f88afc817a2f2fdee821da150",
"sha256" + debug: "a08a6aa15ddce61a76cd1bf2206d017cc4ac7dcb9ca312ad7750a36814448eaa",
],
"executorch": [
"sha256": "57269f9b81d56a3d96ece2012e2ece3af24174846abd98de9a3bee07f3b9583d",
"sha256" + debug: "66975caf3d9c1238d29945288f23ddb6e07e16386d4dedf429c0f2d81cfbe0cc",
],
"mps_backend": [
"sha256": "bb7531172252b6535429fbde429de208665f933d0f509982872eada86839e734",
"sha256" + debug: "6d41437e40cb794b4b7a0d971931773de263370463b38a014f38e99bd1c5d52b",
"frameworks": [
"Metal",
"MetalPerformanceShaders",
"MetalPerformanceShadersGraph",
]
),
Framework(
name: "optimized_backend",
checksum: optimized_sha256
),
Framework(
name: "portable_backend",
checksum: portable_sha256
),
Framework(
name: "quantized_backend",
checksum: quantized_sha256
),
Framework(
name: "xnnpack_backend",
checksum: xnnpack_sha256
)
]
],
],
"optimized_backend": [
"sha256": "bdab593fb49c9000291dbf691ad578d771883745ed2851f00492e828d089d1ea",
"sha256" + debug: "8316ad259d6aafecf2e9abc91a04fc1fa3e0398597e043119b4c29c21e9f2029",
],
"portable_backend": [
"sha256": "38ebdad7d5cd24ca44cd950d561dcf9a9b883dff626c167bc6f5f28f041b8406",
"sha256" + debug: "9e68b3e92e5c920875845f59821ee984b87486d05c1bf8a461b011530e02dd55",
],
"quantized_backend": [
"sha256": "245a3acbf06c6afe9cfb6b03eddfa015390e582ffdfb76efd23b7c810f080f10",
"sha256" + debug: "134d759fe708a4ffbf7efbd25c6020186e1a13abc0dac0a897e2fe13aac3e76a",
],
"xnnpack_backend": [
"sha256": "a1c9cf8347c17f3e50e45d7f37f64ee040f0a1b0a40fa4748d90b45c4150e3b2",
"sha256" + debug: "e92a15c2982630951e5ae5e927d548049db25d89e8b639e8901c5f4650f3a7d0",
],
].reduce(into: [String: [String: Any]]()) {
$0[$1.key] = $1.value
$0[$1.key + debug] = $1.value
}
.reduce(into: [String: [String: Any]]()) {
var newValue = $1.value
if $1.key.hasSuffix(debug) {
$1.value.forEach { key, value in
if key.hasSuffix(debug) {
newValue[String(key.dropLast(debug.count))] = value
}
}
}
$0[$1.key] = newValue.filter { key, _ in !key.hasSuffix(debug) }
}

let package = Package(
name: "executorch",
platforms: [
.iOS(.v15),
],
products: frameworks.map { .library(name: $0.name, targets: ["\($0.name)_dependencies"]) },
targets: frameworks.flatMap { [$0.target(), $0.dependencies()] }
products: deliverables.keys.map { key in
.library(name: key, targets: ["\(key)_dependencies"])
}.sorted { $0.name < $1.name },
targets: deliverables.flatMap { key, value -> [Target] in
[
.binaryTarget(
name: key,
url: "\(url)\(key)-\(version).zip",
checksum: value["sha256"] as? String ?? ""
),
.target(
name: "\(key)_dependencies",
dependencies: [.target(name: key)],
path: ".swift/\(key)",
linkerSettings:
(value["frameworks"] as? [String] ?? []).map { .linkedFramework($0) } +
(value["libraries"] as? [String] ?? []).map { .linkedLibrary($0) }
),
]
}
)
5 changes: 5 additions & 0 deletions docs/source/getting-started-setup.md
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,11 @@ Follow these steps:
./install_requirements.sh
```

To install with pybindings and dependencies for other backends. See options [here](https://github.com/pytorch/executorch/blob/main/install_requirements.sh#L26-L29):
```bash
./install_requirements.sh --pybind <coreml | mps | xnnpack>
```

You have successfully set up your environment to work with ExecuTorch. The next
step is to generate a sample ExecuTorch program.

Expand Down
21 changes: 9 additions & 12 deletions examples/apple/coreml/scripts/export.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,11 @@
from executorch.backends.apple.coreml.partition.coreml_partitioner import (
CoreMLPartitioner,
)
from executorch.exir import to_edge

from executorch.exir.backend.backend_api import to_backend
from executorch.sdk.etrecord import generate_etrecord
from torch.export import export

REPO_ROOT = pathlib.Path(__file__).resolve().parent.parent.parent.parent.parent
EXAMPLES_DIR = REPO_ROOT / "examples"
Expand All @@ -32,7 +34,6 @@

# Script to export a model with coreml delegation.

_CAPTURE_CONFIG = exir.CaptureConfig(enable_aot=True, _unlift=False)
_EDGE_COMPILE_CONFIG = exir.EdgeCompileConfig(
_check_ir_validity=False,
)
Expand Down Expand Up @@ -84,9 +85,7 @@ def partition_module_to_coreml(module):

def lower_module_to_coreml(module, compile_specs):
module = module.eval()
edge = exir.capture(module, example_inputs, _CAPTURE_CONFIG).to_edge(
_EDGE_COMPILE_CONFIG
)
edge = to_edge(export(module, example_inputs), compile_config=_EDGE_COMPILE_CONFIG)
# All of the subsequent calls on the edge_dialect_graph generated above (such as delegation or
# to_executorch()) are done in place and the graph is also modified in place. For debugging purposes
# we would like to keep a copy of the original edge dialect graph and hence we create a deepcopy of
Expand All @@ -95,7 +94,7 @@ def lower_module_to_coreml(module, compile_specs):

lowered_module = to_backend(
CoreMLBackend.__name__,
edge.exported_program,
edge.exported_program(),
compile_specs,
)

Expand All @@ -104,13 +103,11 @@ def lower_module_to_coreml(module, compile_specs):

def export_lowered_module_to_executorch_program(lowered_module, example_inputs):
lowered_module(*example_inputs)
exec_prog = (
exir.capture(lowered_module, example_inputs, _CAPTURE_CONFIG)
.to_edge(_EDGE_COMPILE_CONFIG)
.to_executorch(
config=exir.ExecutorchBackendConfig(
extract_constant_segment=False, extract_delegate_segments=True
)
exec_prog = to_edge(
export(lowered_module, example_inputs), compile_config=_EDGE_COMPILE_CONFIG
).to_executorch(
config=exir.ExecutorchBackendConfig(
extract_constant_segment=False, extract_delegate_segments=True
)
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,19 +30,26 @@ public class MainActivity extends Activity implements Runnable, LlamaCallback {
private LlamaModule mModule = null;
private Message mResultMessage = null;

private int mNumTokens = 0;
private long mRunStartTime = 0;
private String mModelFilePath = "";
private String mTokenizerFilePath = "";

@Override
public void onResult(String result) {
System.out.println("onResult: " + result);
mResultMessage.appendText(result);
mNumTokens++;
run();
}

@Override
public void onStats(float tps) {
runOnUiThread(
() -> {
if (mResultMessage != null) {
mResultMessage.setTokensPerSecond(tps);
mMessageAdapter.notifyDataSetChanged();
}
});
}

private static String[] listLocalFile(String path, String suffix) {
File directory = new File(path);
if (directory.exists() && directory.isDirectory()) {
Expand Down Expand Up @@ -79,14 +86,14 @@ private void setLocalModel(String modelPath, String tokenizerPath) {
});
}

long runDuration = System.currentTimeMillis() - runStartTime;
long loadDuration = System.currentTimeMillis() - runStartTime;
String modelInfo =
"Model path: "
+ modelPath
+ "\nTokenizer path: "
+ tokenizerPath
+ "\nModel loaded time: "
+ runDuration
+ loadDuration
+ " ms";
Message modelLoadedMessage = new Message(modelInfo, false);
runOnUiThread(
Expand Down Expand Up @@ -175,16 +182,10 @@ private void onModelRunStarted() {
view -> {
mModule.stop();
});

mRunStartTime = System.currentTimeMillis();
}

private void onModelRunStopped() {
setTitle(memoryInfo());
long runDuration = System.currentTimeMillis() - mRunStartTime;
if (mResultMessage != null) {
mResultMessage.setTokensPerSecond(1.0f * mNumTokens / (runDuration / 1000.0f));
}
mSendButton.setText("Generate");
mSendButton.setOnClickListener(
view -> {
Expand Down Expand Up @@ -219,8 +220,6 @@ public void run() {
};
new Thread(runnable).start();
});
mNumTokens = 0;
mRunStartTime = 0;
mMessageAdapter.notifyDataSetChanged();
}

Expand Down
Loading

0 comments on commit c13e79a

Please sign in to comment.