diff --git a/bin/wrapper.sh b/bin/wrapper.sh index ffb6e673478..5131878d71d 100755 --- a/bin/wrapper.sh +++ b/bin/wrapper.sh @@ -18,7 +18,7 @@ declare -A envs # list of expected environment variables with paths to products # Define build products here real[moc]=src/moc hint[moc]="make -C $rel_root/src moc" -envs[moc]="MOC_RELEASE_RTS MOC_DEBUG_RTS" +envs[moc]="MOC_NON_INCREMENTAL_RELEASE_RTS MOC_NON_INCREMENTAL_DEBUG_RTS MOC_INCREMENTAL_RELEASE_RTS MOC_INCREMENTAL_DEBUG_RTS MOC_EOP_RELEASE_RTS MOC_EOP_DEBUG_RTS" real[mo-ld]=src/mo-ld hint[mo-ld]="make -C $rel_root/src mo-ld" real[mo-doc]=src/mo-doc @@ -32,8 +32,12 @@ hint[candid-tests]="make -C $rel_root/src candid-tests" rts_hint="make -C $rel_root/rts" -real[MOC_RELEASE_RTS]=rts/mo-rts.wasm -real[MOC_DEBUG_RTS]=rts/mo-rts-debug.wasm +real[MOC_NON_INCREMENTAL_RELEASE_RTS]=rts/mo-rts-non-incremental.wasm +real[MOC_NON_INCREMENTAL_DEBUG_RTS]=rts/mo-rts-non-incremental-debug.wasm +real[MOC_INCREMENTAL_RELEASE_RTS]=rts/mo-rts-incremental.wasm +real[MOC_INCREMENTAL_DEBUG_RTS]=rts/mo-rts-incremental-debug.wasm +real[MOC_EOP_RELEASE_RTS]=rts/mo-rts-eop.wasm +real[MOC_EOP_DEBUG_RTS]=rts/mo-rts-eop-debug.wasm for var in ${envs[moc]}; do hint[$var]=$rts_hint diff --git a/default.nix b/default.nix index b59f1cdfea9..a582d9adcd1 100644 --- a/default.nix +++ b/default.nix @@ -20,6 +20,10 @@ let ic-ref-run = cp ${ic-hs-pkgs.ic-hs}/bin/ic-ref-run $out/bin ''; in +let + nixos-unstable = import nixpkgs.sources.nixpkgs-unstable {}; +in + let haskellPackages = nixpkgs.haskellPackages.override { overrides = import nix/haskell-packages.nix nixpkgs subpath; }; in @@ -35,6 +39,7 @@ let wasmtime rust-bindgen python3 + nixos-unstable.emscripten ] ++ pkgs.lib.optional pkgs.stdenv.isDarwin [ libiconv ]; @@ -193,7 +198,7 @@ rec { name = "motoko-rts-deps"; src = subpath ./rts; sourceRoot = "rts/motoko-rts-tests"; - sha256 = "sha256-jN5nx5UNBHlYKnC0kk90h6mWPUNrqPS7Wln2TixbGgA="; + sha256 = "sha256-prLZVOWV3BFb8/nKHyqZw8neJyBu1gs5d0D56DsDV2o="; copyLockfile = true; }; @@ -241,6 +246,7 @@ rec { "directory" = "$(stripHash ${allDeps})" __END__ + ${llvmEnv} export TOMMATHSRC=${nixpkgs.sources.libtommath} export MUSLSRC=${nixpkgs.sources.musl-wasi}/libc-top-half/musl @@ -255,8 +261,12 @@ rec { installPhase = '' mkdir -p $out/rts - cp mo-rts.wasm $out/rts - cp mo-rts-debug.wasm $out/rts + cp mo-rts-non-incremental.wasm $out/rts + cp mo-rts-non-incremental-debug.wasm $out/rts + cp mo-rts-incremental.wasm $out/rts + cp mo-rts-incremental-debug.wasm $out/rts + cp mo-rts-eop.wasm $out/rts + cp mo-rts-eop-debug.wasm $out/rts ''; # This needs to be self-contained. Remove mention of nix path in debug @@ -266,7 +276,17 @@ rec { -t ${nixpkgs.rustc-nightly} \ -t ${rtsDeps} \ -t ${rustStdDeps} \ - $out/rts/mo-rts.wasm $out/rts/mo-rts-debug.wasm + $out/rts/mo-rts-non-incremental.wasm $out/rts/mo-rts-non-incremental-debug.wasm + remove-references-to \ + -t ${nixpkgs.rustc-nightly} \ + -t ${rtsDeps} \ + -t ${rustStdDeps} \ + $out/rts/mo-rts-incremental.wasm $out/rts/mo-rts-incremental-debug.wasm + remove-references-to \ + -t ${nixpkgs.rustc-nightly} \ + -t ${rtsDeps} \ + -t ${rustStdDeps} \ + $out/rts/mo-rts-eop.wasm $out/rts/mo-rts-eop-debug.wasm ''; allowedRequisites = []; @@ -357,6 +377,31 @@ rec { EXTRA_MOC_ARGS = "--sanity-checks"; }; + snty_compacting_gc_subdir = dir: deps: + (test_subdir dir deps).overrideAttrs { + EXTRA_MOC_ARGS = "--sanity-checks --compacting-gc"; + }; + + snty_generational_gc_subdir = dir: deps: + (test_subdir dir deps).overrideAttrs { + EXTRA_MOC_ARGS = "--sanity-checks --generational-gc"; + }; + + snty_incremental_gc_subdir = dir: deps: + (test_subdir dir deps).overrideAttrs { + EXTRA_MOC_ARGS = "--sanity-checks --incremental-gc"; + }; + + enhanced_orthogonal_persistence_subdir = dir: deps: + (test_subdir dir deps).overrideAttrs { + EXTRA_MOC_ARGS = "--enhanced-orthogonal-persistence"; + }; + + snty_enhanced_orthogonal_persistence_subdir = dir: deps: + (test_subdir dir deps).overrideAttrs { + EXTRA_MOC_ARGS = "--sanity-checks --enhanced-orthogonal-persistence"; + }; + perf_subdir = dir: deps: (test_subdir dir deps).overrideAttrs (args: { checkPhase = '' @@ -415,7 +460,7 @@ rec { ''; }; - # wasm-profiler is not compatible with passive data segments + # wasm-profiler is not compatible with passive data segments and memory64 # profiling-graphs = testDerivation { # src = test_src "perf"; # buildInputs = @@ -468,20 +513,32 @@ rec { in fix_names ({ run = test_subdir "run" [ moc ] ; run-dbg = snty_subdir "run" [ moc ] ; + run-eop-release = enhanced_orthogonal_persistence_subdir "run" [ moc ]; + run-eop-debug = snty_enhanced_orthogonal_persistence_subdir "run" [ moc ]; # ic-ref-run = test_subdir "run-drun" [ moc ic-ref-run ]; drun = test_subdir "run-drun" [ moc nixpkgs.drun ]; drun-dbg = snty_subdir "run-drun" [ moc nixpkgs.drun ]; + drun-compacting-gc = snty_compacting_gc_subdir "run-drun" [ moc nixpkgs.drun ] ; + drun-generational-gc = snty_generational_gc_subdir "run-drun" [ moc nixpkgs.drun ] ; + drun-incremental-gc = snty_incremental_gc_subdir "run-drun" [ moc nixpkgs.drun ] ; + drun-eop-release = enhanced_orthogonal_persistence_subdir "run-drun" [ moc nixpkgs.drun ] ; + drun-eop-debug = snty_enhanced_orthogonal_persistence_subdir "run-drun" [ moc nixpkgs.drun ] ; fail = test_subdir "fail" [ moc ]; + fail-eop = enhanced_orthogonal_persistence_subdir "fail" [ moc ]; repl = test_subdir "repl" [ moc ]; + repl-eop = enhanced_orthogonal_persistence_subdir "repl" [ moc ]; ld = test_subdir "ld" ([ mo-ld ] ++ ldTestDeps); + ld-eop = enhanced_orthogonal_persistence_subdir "ld" ([ mo-ld ] ++ ldTestDeps); idl = test_subdir "idl" [ didc ]; mo-idl = test_subdir "mo-idl" [ moc didc ]; + mo-idl-eop = enhanced_orthogonal_persistence_subdir "mo-idl" [ moc didc ]; trap = test_subdir "trap" [ moc ]; + trap-eop = enhanced_orthogonal_persistence_subdir "trap" [ moc ]; run-deser = test_subdir "run-deser" [ deser ]; perf = perf_subdir "perf" [ moc nixpkgs.drun ]; bench = perf_subdir "bench" [ moc nixpkgs.drun ic-wasm ]; viper = test_subdir "viper" [ moc nixpkgs.which nixpkgs.openjdk nixpkgs.z3_4_12 ]; - # TODO: profiling-graph is excluded because the underlying partity_wasm is deprecated and does not support passive data segments. + # TODO: profiling-graph is excluded because the underlying partity_wasm is deprecated and does not support passive data segments and memory64. inherit qc lsp unit candid coverage; }) // { recurseForDerivations = true; }; @@ -526,7 +583,7 @@ rec { doInstallCheck = true; test = ./test + "/test-${n}.js"; installCheckPhase = '' - NODE_PATH=$out/bin node $test + NODE_PATH=$out/bin node --experimental-wasm-memory64 $test ''; }; in @@ -749,8 +806,9 @@ EOF check-grammar check-error-codes ] ++ - builtins.attrValues tests ++ - builtins.attrValues js; + builtins.attrValues tests + ++ builtins.attrValues js + ; }; viperServer = nixpkgs.fetchurl { diff --git a/design/Custom-Sections.md b/design/Custom-Sections.md index 35f70d7cb68..fe0d8cba31e 100644 --- a/design/Custom-Sections.md +++ b/design/Custom-Sections.md @@ -34,3 +34,7 @@ let hash : string -> int32 = fun s -> (Lib.String.explode s) ) ``` + +Motoko generates an additional `"enhanced-orthogonal-persistence"` private custom section to +mark Motoko Wasm binaries that rely on IC's support to retain the main Wasm memory on an upgrade, +cf. [Orthogonal Persistence](OrthogonalPersistence.md). diff --git a/design/DFX-Interface.md b/design/DFX-Interface.md index 5370bcba09c..8e6c565890a 100644 --- a/design/DFX-Interface.md +++ b/design/DFX-Interface.md @@ -118,6 +118,10 @@ used only in very specific cases. The above metadata is stored in the Wasm module, and is only accessible by the controllers of the canister, unless the metadata name is specified in the `--public-metadata` flag. +Moreover, the compiler generates a special marker custom section `"enhanced-orthogonal-persistence"` if the new orthogonal +persistence support is enabled, see [Orthogonal Persistence](OrthogonalPersistence.md). This section is always private and +always emited independent of the compiler flags `--public-metadata` or `--public-metadata`. + Checking stable type compatibility ---------------------------------- @@ -130,6 +134,10 @@ a type safe way without unintentional data loss. If the check succeeds, nothing will be printed. If the check fails, the error message will be printed in stderr and the command returns with exit code 1. +The check can also emit warning messages, e.g. if stable variables are dropped. + +With [enhanced orthogonal persistence](OrthogonalPersistence.md), the stable compatibility is also integrated in the runtime +system, to atomically guarantee memory compatibility during an upgrade. Invoking the IDE ---------------- diff --git a/design/GraphCopyStabilization.md b/design/GraphCopyStabilization.md new file mode 100644 index 00000000000..aa125afc896 --- /dev/null +++ b/design/GraphCopyStabilization.md @@ -0,0 +1,132 @@ +# Graph-Copy-Based Stabilization + +This is part of the enhanced orthogonal persistence support, see [Orthogonal Persistence](OrthogonalPersistence.md). + +## Purpose +This allows future potentially radical changes of the persistent memory layout, such as introducing a new GC, rearranging persistent metadata, or specializing arrays for small element types etc. +This also relies on precise value tagging to allow more advanced changes that require value metadata, e.g. specializing arrays for small value element types or even downgrading to 32-bit heap layouts (provided that the amount of live data fits into a 32-bit memory). + +## Design +Graph copy of sub-graph of stable objects from main memory to stable memory and vice versa on upgrades. + +## Properties +* Preserve sharing for all objects like in the heap. +* Allow the serialization format to be independent of the main memory layout. +* Limit the additional main memory needed during serialization and deserialization. +* Avoid deep call stack recursion (stack overflow). +* Allows arbitrarily long large stabilization/destabilization due to incremental mechanism (see below). + +## Memory Compatibility Check +Apply a memory compatibility check analogous to the enhanced orthogonal persistence, since the upgrade compatibility of the graph copy is not identical to the Candid subtype relation. + +## Incremental Upgrade +Supporting arbitrarily large upgrades beyond the instruction limit: +* Splitting the stabilization/destabilization in multiple asynchronous messages. +* Limiting the stabilization work units to fit the update or upgrade messages. +* Blocking other messages during the explicit incremental stabilization. +* Restricting the upgrade functionality to the canister owner and controllers. +* Stopping the GC during the explicit incremental upgrade process. + +**Note**: Graph copying needs to be explicitly initiated as the usual upgrade engages enhanced orthogonal persistence, simply retaining main memory with compatibility check. + +### Usage +When upgrading to a Motoko version that is not compatible with the current enhanced orthogonal persistence: + +1. Initiate the explicit stabilization before the upgrade: + +``` +dfx canister call CANISTER_ID __motoko_stabilize_before_upgrade "()" +``` + +* An assertion first checks that the caller is the canister owner or a canister controller. +* All other messages to the canister will be blocked until the upgrade has been successfully completed. +* The GC is stopped. +* If defined, the actor's pre-upgrade function is called before the explicit stabilization. +* The stabilzation runs in possibly multiple asynchronous messages, each with a limited number of instructions. + +2. Run the actual upgrade: + +``` +dfx deploy CANISTER_ID +``` + +* Completes the explicit stabilization if not yet done before this call. +* Perform the actual upgrade of the canister on the IC. +* Detects that graph-copying is in use. +* Clears the heap if enhanced orthogonal persistence is active. +* Start the destabilization with a limited number of steps to fit into the upgrade message. +* If destabilization cannot be completed, the canister does not start the GC and does not accept messages except step 3. + +3. Complete the explicit destabilization after the upgrade: + +``` +dfx canister call CANISTER_ID __motoko_destabilize_after_upgrade "()" +``` + +* An assertion checks that the caller is the canister owner or a canister controller. +* All other messages remain blocked until the successful completion of the destabilization. +* The destabilzation runs in possibly multiple asynchronous messages, each with a limited number of instructions. +* If defined, the actor's post-upgrade function is called at the end of the explicit destabilization. +* The GC is restarted. + +### Remarks +* When receiving the `dfx` error "The request timed out." during explicit stabilization, upgrade, or destabilization, one can simply repeat the call until it completes. +* Steps 3 (explicit destabilization) may not be needed if the corresponding operation fits into the upgrade message. +* Stabilization and destabilization steps are limited to the increment limits: + + Operation | Message Type | IC Instruction Limit | **Increment Limit** + ----------|--------------|----------------------|-------------------- + **Explicit (de)stabilization step** | Update | 20e9 | **16e9** + **Actual upgrade** | Upgrade | 200e9 | **160e9** + +* The graph copy steps also limit the amount of processed stable data (read or write), in order not to exceed the +IC's stable memory access limits. + + Operation | Message Type | IC Stable Access Limit | **Increment Limit** + ----------|--------------|----------------------|-------------------- + **Explicit (de)stabilization step** | Update | 2 GB | **1 GB** + **Actual upgrade** | Upgrade | 8 GB | **6 GB** + +## Graph-Copy Algorithm +Applying Cheney’s algorithm [1, 2] for both serialization and deserialization: + +### Serialization +* Cheney’s algorithm using main memory as from-space and stable memory as to-space: +* Focusing on stable variables as root (sub-graph of stable objects). +* The target pointers and Cheney’s forwarding pointers denote the (skewed) offsets in stable memory. +* Using streaming reads for the `scan`-pointer and streaming writes for the `free`-pointer in stable memory. + +### Deserialization +* Cheney’s algorithm using stable memory as from-space and main memory as to-space: +* Starting with the stable root created during the serialization process. +* A scan stack is used in the main memory to remember the deserialized objects for later scanning. +* Objects are allocated in main memory using the default allocator. +* Using random read/write access on the stable memory. + +## Stable Format +For a long-term perspective, the object layout of the serialized data in the stable memory is fixed and independent of the main memory layout. +* Pointers are represented in 64-bit like main memory in enhanced orthogonal persistence. +* The Brooks forwarding pointer used by the incremental GC is omitted. +* The pointers encode skewed stable memory offsets to the corresponding target objects. +* References to the null objects are encoded by a defined null sentinel value. +* `BigInt` are explicitly serialized in a defined portable little endian representation, without that the serialization or deserialization allocates temporary objects. +The format is also versioned to allow future refinements of the graph copy algorithm. + +## Specific Aspects +* Field hashes in objects are serialized in a blob. On deserialization, the hash blob is allocated in the dynamic heap. Same-typed objects that have been created by the same program version share the same hash blob. +* Stable records can dynamically contain non-stable fields due to structural sub-typing. A dummy value can be serialized for such fields as a new program version can no longer access this field through the stable types. +* For backwards compatibility, old Candid destabilization is still supported when upgrading from a program that used older compiler version. +* Incremental GC: Serialization needs to consider Brooks forwarding pointers (not to be confused with the Cheney's forwarding information), while deserialization can deal with partitioned heap that can have internal fragmentation (free space at partition ends). +* The partitioned heap prevents linear scanning of the heap, especially in the presence of large objects that can be placed at a higher partition than subsequently allocated normal-sized objects. For this reason, a scan stack is allocated in the main memory, remembering the deserialized objects that still need to be scanned. With this, the deserialization does not need to make any assumptions of the heap structure (e.g. monotonically increasing allocations, free space markers, empty heap on deserialization start etc.). +* If actor fields are promoted to the `Any` type in a new program version, their content is released in that variable to allow memory reclamation. +* Both stabilization and destabilization read and write data linearly, which is beneficial for guarding a work set limit (number of accessed pages) per IC message. Destabilization is also linear because it deserializes objects in the same order back as they have been serialized. + +## Open Aspects +* Unused fields in stable records that are no longer declared in a new program versions should be removed. This could be done during garbage collection, when objects are moved/evacuated. This scenario equally applies to enhanced orthogonal persistence. +* The scan stack used during destabilization involves dynamic allocations. + +## References + +[1] C. J. Cheney. A Non-Recursive List Compacting Algorithm. Communications of the ACM, 13(11):677-8, November 1970. + +[2] R. Jones and R. Lins. Garbage Collection: Algorithms for Automatic Dynamic Memory Management. Wiley 2003. Algorithm 6.1: Cheney's algorithm, page 123. diff --git a/design/Implementation.md b/design/Implementation.md index be892d3cadc..9d73ed44efa 100644 --- a/design/Implementation.md +++ b/design/Implementation.md @@ -9,25 +9,29 @@ ## Heap -* Uniform representation with 32 bit word size. +* Uniform representation with a defined word size. +For [enhanced orthogonal persistence](OrthogonalPersistence.md), 64-bit words are used, while for classical persistence, the word size is 32-bit. -* Use pointer tagging in LSB;. - - 0 for pointers, 1 for scalars. - - Scalars are real value shifted left by 1, lowest bit set. - -* Q: Allocation and GC strategies? +* Use pointer tagging in the LSB: + - 1 for pointers, 0 for scalars. + - Scalars are real value shifted left by 1, lowest bit clear. + For [enhanced orthogonal persistence](OrthogonalPersistence.md), the types of scalars are additionally tagged. + +* Garbage collected. ## Primitive types * Nat and Int compile to heap-allocated big nums; unboxed for small numbers `<= 31` bit. -* Nat8/16 compile to unboxed scalars; Nat32/64 are boxed. +* Nat8/16 compile to unboxed scalars; + On a 32-bit heap, Nat32/64 are boxed. + On a 64-bit heap, only Nat64 is boxed, while Nat32 remains unboxed. - May unbox locally. * Characters are scalars (unicode code points). -* Text is heap-allocated. +* Text is heap-allocated. Using ropes for concatenations. ## Tuples @@ -103,6 +107,11 @@ TODO TODO +## Persistence + +Different * [persistence modes](OrthogonalPersistence.md): +* [Enhanced orthogonal persistence](OrthogonalPersistence.md). +* [Classical persistence](OldStableMemory.md). # Hypervisor Extensions needed diff --git a/design/Memory.md b/design/Memory.md index 7f628ad9a11..ab245410cf4 100644 --- a/design/Memory.md +++ b/design/Memory.md @@ -29,8 +29,11 @@ In the future (with the GC proposal), Wasm will have a 4th form of mutable state The Heap is *not* an explicit entity that can be im/exported, only individual references to structures on the heap can be passed. -Note: It is highly likely that most languages implemented on Wasm will eventually use Wasm GC. -Various implementers are currently waiting for it to become available before they start porting their language to Wasm. +Note: It is highly likely that several managed languages implemented on Wasm will eventually use Wasm GC. +However, in our case, it would require snapshotting the Wasm managed heap which is currently not possible for `wasmtime`. +Moreover, the GC implemented on the managed heap does probably not fit the IC with hard instruction limits. +A fully incremental GC would be needed, which is currently not implemented in any Wasm engine (often only using reference counting or a GC that has worst-case unbounded pauses). +Conceptually, enhanced orthogonal persistence could be implemented on Wasm GC. ### Internet Computer (IC) @@ -48,19 +51,16 @@ All references are *sharable*, i.e., can be passed between actors as message arg Other than actors, all reference types must be pure (immutable and without identity) to prevent shared state and allow transparent copying by the implementation. Element buffers can encode arbitrary object trees. -Once Wasm GC is available, some of these types (esp. buffers) could be replaced by proper Wasm types. - - -## Language Implementation +## Language Implementation Rationales ### Representing Data Structures There are 3 possible ways of representing structured data in Wasm/IC. -#### Using Wasm Memory +#### Using Wasm Memory <- Chosen Design -All data structures are laid out and managed in Memory by the compiler and the language runtime. -References are stored via indirections through a Table. +All data structures are laid out and managed in Wasm memory by the compiler and the runtime system. +Function references are stored via indirections through a Wasm table. Pros: 1. local data access maximally efficient @@ -69,7 +69,7 @@ References are stored via indirections through a Table. Cons: 1. message arguments require de/serialisation into IC buffers on both ends (in addition to the de/serialisation steps already performed by IC) 2. each actor must ship its own instance of a GC (for both memory and table) and de/serialisation code - 3. all references require an indirection + 3. all function references require an indirection 4. more implementation effort #### Using IC API @@ -102,88 +102,8 @@ All data structures are represented as Wasm GCed objects. 1. Wasm GC is 1-2 years out 2. unclear how to implement transparent persistence (see below) - ## Persistence -### Persistence models - -There are at least 3 general models for providing persistence. - -#### *Explicit* persistence - -IC API provides explicit system calls to manage persistent data. -Wasm state is volatile; each message received runs in a fresh instance of the actor's module. - - Pros: - 1. easy and efficient to implement - 2. apps have maximal control over persistent data and its layout - - Cons: - 1. bifurcation of state space - 2. programs need to load/store and de/serialise persistent data to/from local state - -#### *Transparent* persistence - -All Wasm state is implicitly made persistent. -Conceptually, each message received runs in the same instance of the actor's module. - - Pros: - 1. "perfect" model of infinitely running program - 2. programmers need to "think" less - - Cons: - 1. hard to implement efficiently without knowing neither language nor application - 2. can easily lead to space leaks or inefficiencies if programmers aren't careful - -#### *Hybrid* persistence -Wasm state entities can be marked as persistent selectively. -Conceptually, each message received runs in the same instance of the actor's module, -but Wasm is extended with some notion of volatile state and reinitialisation. - - Pros: - 1. compromise between other two models - - Cons: - 1. compromise between other two models - 2. creates dangling references between bifurcated state parts - 3. incoherent with Wasm semantics (segments, start function) - -### Implementing Transparent persistence - -#### *High-level* implementation of persistence - -Hypervisor walks data graph (wherever it lives), turns it into merkle tree. - - Pros: - 1. agnostic to implementation details of the engine - 2. agnostic to GC (or subsumes GC) - - Cons: - 1. requires knowledge of and access to data graph - 2. deep mutations result in deep changes in merkle tree (mutation cost is logarithmic in depth) - 3. unclear how to detect changes efficiently - -#### *Low-level* implementation of persistence - -Hypervisor provides memory to Wasm engine, detects dirty pages; could be memory-mapped files. - - Pros: - 1. agnostic to language and data graph - 2. fast when mutation patterns have good locality - 3. can potentially offload much of the implementation to existing hardware/OS/library mechanisms - - Cons: - 1. bad interaction with language-internal GC (mutates large portions of the memory at once) - 2. does not extend to tables (contain position-dependent physical pointers) - 3. no obvious migration path to Wasm GC - 4. dependent on VM specifics (and internals?) - -#### *Selectable* implementation of persistence - -Provide both previous options, possibly in a mutually exclusive fashion. - - Pros: - 1. choice for implementers - - Cons: - 1. maximal complexity for platform +Different * [persistence modes](OrthogonalPersistence.md): +* [Enhanced orthogonal persistence](OrthogonalPersistence.md). +* [Classical persistence](OldStableMemory.md). diff --git a/design/OldStableMemory.md b/design/OldStableMemory.md index 322543e8777..7267b4e2b45 100644 --- a/design/OldStableMemory.md +++ b/design/OldStableMemory.md @@ -124,10 +124,10 @@ module StableMemory { (I think the compiler will still optimize these nested calls to known function calls, but it would be worth checking). -# Maintaining existing Stable Variables. +# Maintaining existing Stable Variables (Legacy Persistence). -Stable memory is currently hidden behind the abstraction of stable -variables, which we will still need to maintain. The current +In classical persistence, stable memory is hidden behind the abstraction of stable +variables, which we will still need to maintain. This old implementation of stable variables stores all variables as a Candidish record of _stable_ fields, starting at stable memory address 0 with initial word encoding size (in bytes?) followed by contents. @@ -170,6 +170,9 @@ and other metadata (so that initial reads after growing beyond page `size` alwa This scheme avoids relocating most of StableMem and is constant time when there are no stable variables. +[Enhanced orthogonal persistence](OrthogonalPersistence.md) introduces a new peristence implementation. +The old mechanism is only supported for backwards compatibility. + # Details: Stable memory layout (during execution): diff --git a/design/OrthogonalPersistence.md b/design/OrthogonalPersistence.md index 3551af8469c..e1b971fffdb 100644 --- a/design/OrthogonalPersistence.md +++ b/design/OrthogonalPersistence.md @@ -1,29 +1,33 @@ -# Orthogonal Persistence (Stable Heap) -This realizes the vision of keeping the canister main memory persistent even across upgrades and thus allows scalable upgrades. -Canister upgrades do no longer involve serialization and deserialization to and from secondary stable memory. +# Enhanced Orthogonal Persistence -## Purpose -* **Instantenous upgrades**: New program versions simply resume on the existing main memory and have access to the memory-compatible data. -* **Scalable upgrades**: The upgrade mechanism scales with larger heaps and in contrast to serialization, does not hit IC instruction limits. +This implements the vision of **enhanced orthogonal persistence** in Motoko that combines: +* **Stable heap**: Persisting the program main memory across canister upgrades. +* **64-bit heap**: Extending the main memory to 64-bit for large-scaled persistence. -## Broader Vision -In the longer term, this approach aims to enable **true orthogonal persistence** that is simple, flexible, efficient, and scalable. -While this version implements the runtime support for 32-bit memory, this could be leveraged to 64-bit persistent main memory in future. -As a result, the use of secondary storage (explicit stable memory, dedicated stable data structures, DB-like storage abstractions) will no longer be necessary: -Motoko developers could directly work on their normal object-oriented program structures that are automatically persisted and retained across program version changes. -With 64-bit main memory, large-scaled orthogonal persistence would be enabled, supported by the incremental GC that is designed to also scale in 64-bit. +As a result, the use of secondary storage (explicit stable memory, dedicated stable data structures, DB-like storage abstractions) will no longer be necessary: Motoko developers can directly work on their normal object-oriented program structures that are automatically persisted and retained across program version changes. + +## Advantages +Compared to the existing orthogonal persistence in Motoko, this design offers: +* **Performance**: New program versions directly resume from the existing main memory and have access to the memory-compatible data. +* **Scalability**: The upgrade mechanism scales with larger heaps and in contrast to serialization, does not hit IC instruction limits. + +Compared to the explicit use of stable memory, this design improves: +* **Simplicity**: Developers do not need to deal with explicit stable memory. +* **Performance**: No copying to and from the separate stable memory is necessary. ## Design -The stable heap is based on the following main properties: +The enhanced orthogonal persistence is based on the following main properties: * Extension of the IC to retain main memory on upgrades. +* Supporting 64-bit main memory on the IC. * A long-term memory layout that is invariant to new compiled program versions. * A fast memory compatibility check performed on each canister upgrade. * Incremental garbage collection using a partitioned heap. -### IC Extension -As a prerequisite for the stable heap support, the IC runtime support has to be extended in order not to erase the main memory on upgrades. -This is realized in a specific IC PR (https://github.com/luc-blaeser/ic/tree/luc/stable-heap-on-release) that retains the main memory even on upgrades, similar to normal canister message execution. +### Integration with Classical Persistence +Enhanced orthogonal persistence is offered for **beta testing** via the compiler flag `--enhanced-orthogonal-persistence`. +Classical persistence with 32-bit main memory and Candid stabilization currently remains the default mode. +See `design/PersistenceModes.md` for more information. ### Memory Layout In a co-design between the compiler and the runtime system, the main memory is arranged in the following structure, invariant of the compiled program version: @@ -34,6 +38,7 @@ In a co-design between the compiler and the runtime system, the main memory is a ### Persistent Metadata The persistent metadata describes all anchor information for the program to resume after an upgrade. + More specifically, it comprises: * A stable heap version that allows evolving the persistent memory layout in the future. * The stable subset of the main actor, containing all stable variables declared in the main actor. @@ -42,9 +47,9 @@ More specifically, it comprises: * A reserve for future metadata extensions. ### Compatibility Check -Upgrades are only permitted if the new program version is compatible to the old version, such that the runtime system guarantees a compatible memory structure. +Upgrades are only permitted if the new program version is compatible with the old version, such that the runtime system guarantees a compatible memory structure. -Compatible changes for immutable types are equivalent to the allowed Motoko subtype relation, e.g. +Compatible changes for immutable types are largely analogous to the allowed Motoko subtype relation, e.g. * Adding or removing actor fields. * Removing object fields. * Adding variant fields. @@ -65,10 +70,9 @@ This compatibility check serves as an additional safety measure on top of the DF ### Garbage Collection The implementation focuses on the incremental GC and abandons the other GCs because the GCs use different memory layouts. For example, the incremental GC uses a partitioned heap with objects carrying a forwarding pointer. -The incremental GC is chosen because it is designed to scale on large heaps and the stable heap design also aims to increase scalability. Moreover, it is suited to scale on 64-bit memory in future. +The incremental GC is chosen because it is designed to scale on large heaps and the stable heap design also aims to increase scalability. -The garbage collection state needs to be persisted and retained across upgrades. -This is because the GC may not yet be completed at the time of an upgrade, such that object forwarding is still in use. The partition table is stored as part of the GC state. +The garbage collection state needs to be persisted and retained across upgrades. This is because the GC may not yet be completed at the time of an upgrade, such that object forwarding is still in use. The heap partition structure is described by a linked list of partition tables that is reachable from the GC state. The garbage collector uses two kinds of roots: * Persistent roots: These refer to root objects that need to survive canister upgrades. @@ -82,50 +86,60 @@ The transient roots are referenced by the Wasm data segments and comprise: * All canister variables of the current version, including flexible variables. ### Main Actor -On an upgrade, the main actor is recreated and existing stable variables are recovered from the persistent root. -The remaining actor variables, the flexible fields as well as new stable variables, are (re)initialized. -As a result, the GC can collect unreachable flexible objects of previous canister versions. -Unused stable variables of former versions can also be reclaimed by the GC. +On an upgrade, the main actor is recreated and existing stable variables are recovered from the persistent root. The remaining actor variables, the flexible fields as well as new stable variables, are (re)initialized. -### No Static Heap -The static heap is abandoned and former static objects need to be allocated in the dynamic heap. -This is because these objects may also need to survive upgrades and must not be not overwritten by new data segments. +As a result, the GC can collect unreachable flexible objects of previous canister versions. Unused stable variables of former versions can also be reclaimed by the GC. -The incremental GC also operates on these objects, meaning that forwarding pointer resolution is also necessary for these objects. +### No Static Heap +The static heap is abandoned and former static objects need to be allocated in the dynamic heap. This is because these objects may also need to survive upgrades and the persistent main memory cannot accommodate a growing static heap of a new program version in front of the existing dynamic heap. The incremental GC also operates on these objects, meaning that forwarding pointer resolution is also necessary for these objects. For memory and runtime efficiency, object pooling is implemented for compile-time-known constant objects (with side-effect-free initialization), i.e. those objects are already created on program initialization/upgrade in the dynamic heap and thereafter the reference to the corresponding prefabricated object is looked up whenever the constant value is needed at runtime. -The runtime systems avoids any global Wasm variables for state that needs to be preserved on upgrades. -Instead, such global runtime state is stored in the persistent metadata. +The runtime system avoids any global Wasm variables for state that needs to be preserved on upgrades. Instead, such global runtime state is stored in the persistent metadata. ### Wasm Data Segments -Only passive Wasm data segments are used by the compiler and runtime system. In contrast to ordinary active data segments, passive segments can be explicitly loaded to a dynamic address. +Only passive Wasm data segments are used by the Motoko compiler and runtime system. In contrast to ordinary active data segments, passive segments can be explicitly loaded to a dynamic address. This simplifies two aspects: -* The generated Motoko code can contain arbitrarily large data segments which can be loaded to dynamic heap when needed. -* The IC can simply retain the main memory on an upgrade without needing to patch the active data segments of the new program version to the persistent memory. +* The generated Motoko code can contain arbitrarily large data segments (to the maximum that is supported by the IC). The segments can be loaded to the dynamic heap when needed. +* The IC can simply retain the main memory on an upgrade without needing to patch any active data segments of the new program version to the persistent main memory. -However, more specific handling is required for the Rust-implemented runtime system: -The Rust-generated active data segments of the runtime system is changed to passive and loaded to the expected static address at the program start (canister initialization and upgrade). -The location and size of the RTS data segments is therefore limited to a defined reserve, see above. -This is acceptable because the RTS only uses small size for data segments (e.g. 54KB) that is independent of the compiled Motoko program. +However, more specific handling is required for the Rust-implemented runtime system (RTS): The Rust-generated active data segment of the runtime system is changed to the passive mode and loaded to the expected static address on the program start (canister initialization and upgrade). The location and size of the RTS data segments is therefore limited to a defined reserve of 512 KB, see above. This is acceptable because the RTS only requires a controlled small amount of memory for its data segments, independent of the compiled Motoko program. ### Null Sentinel As an optimization, the top-level `null` pointer is represented as a constant sentinel value pointing to the last unallocated Wasm page. This allows fast null tests without involving forwarding pointer resolution of potential non-null comparand pointers. +### Memory Capacity +The canister has no upfront knowledge of the maximum allocatable Wasm main memory in 64-bit address space, as there is no IC API call to query the main memory limit. This limit may also be increased in future IC releases. + +Therefore, a mechanism is implemented to deal with an unknown and dynamically increasable main memory capacity offered by the IC. This is needed in two cases: + +* GC reserve (strict): The runtime system ensures sufficient free space to allow garbage collection at all times, even if the heap is full. For this purpose, the runtime system already pre-allocates the reserve, to be sure that the reserve is available despite the unknown capacity. As an optimization, this pre-allocation is skipped when the memory demand including the GC reserve is below a guaranteed minimum Wasm memory limit of the IC, e.g. 4GB or 6GB. +* GC scheduling (heuristic): The GC schedules at high frequency when memory is becoming scarce. For this purpose, the GC maintains an assumption of the minimum memory limit and probes the supposed limit when the heap size approaches this limit. If the allocation succeeds, the assumed limit is increased. Otherwise, the critical high-frequency GC scheduling rate is activated. + +In both cases, the runtime system tries to reduce Wasm memory allocations as much as possible, i.e. not pre-allocating memory for small heap sizes, and not probing an allocation in certain memory ranges by assuming that the IC only offers main memory of a certain granularity, e.g. multiples of 2GB. To save instructions, the critical GC scheduling is only activated when reaching the actual memory limit. Moreover, the mechanism can handle an increased memory capacity at runtime, e.g. when the IC is upgraded to a new release with a higher memory limit. + ### Migration Path -When migrating from the old serialization-based stabilization to the new stable heap, the old data is deserialized one last time from stable memory and then placed in the new stable heap layout. -Once operating on the stable heap, the system prevents downgrade attempts to the old serialization-based persistence. +When migrating from the old serialization-based stabilization to the new persistent heap, the old data is deserialized one last time from stable memory and then placed in the new persistent heap layout. Once operating on the persistent heap, the system should prevent downgrade attempts to the old serialization-based persistence. -### Old Stable Memory -The old stable memory remains equally accessible as secondary memory with the new support. +Assuming that the persistent memory layout needs to be changed in the future, the runtime system supports serialization and deserialization to and from stable memory in a defined data format using graph copy. -## Current Limitations -* Freeing old object fields: While new program versions can drop object fields, the runtime system should also delete the redundant fields of persistent objects of previous program versions. This could be realized during garbage collection when objects are copied. For this purpose, the runtime system may maintain a set of field hashes in use and consult this table during garbage collection. -* Bounded Rust call stack size: The Rust call stack size needs to be bounded and can no longer be configured by the user. -* The Wasm profiler (only used for the flamegraphs) is no longer applicable because the underlying `parity-wasm` crate lacks full support of passive data segments. A re-implementation of the profiler would be needed. +### Graph Copy +The graph copy is an alternative persistence mechanism that will be only used in the rare situation when the persistent memory layout will be changed in the future. Arbitrarily large data can be serialized and deserialized beyond the instruction and working set limit of upgrades: Large data serialization and deserialization is split in multiple messages, running before and/or after the IC upgrade to migrate large heaps. Of course, other messages will be blocked during this process and only the canister owner or the canister controllers are permitted to initiate this process. + +Graph copying needs to be explicitly initiated before an upgrade to new Motoko version that is incompatible to the current enhanced orthogonal persistent layout. For large data, the graph copy needs to be manually completed after the actual upgrade. -## Related PRs +``` +dfx canister call CANISTER_ID __motoko_stabilize_before_upgrade "()" +dfx deploy CANISTER_ID +dfx canister call CANISTER_ID __motoko_destabilze_after_upgrade "()" +``` -* IC with stable main memory support: https://github.com/luc-blaeser/ic/tree/luc/stable-heap-on-release -* Wasm64 Support for Motoko: https://github.com/dfinity/motoko/pull/4136 +More detailed information and instructions on graph copy are contained in `design/GraphCopyStabilization.md`. + +### Old Stable Memory +The old stable memory remains equally accessible as secondary (legacy) memory with the new support. + +## Current Limitations +* The memory footprint of a program increases with 64 bit as the word size for scalars and pointers are doubled. In turn, in some cases, boxing can be avoided due to larger word size which again reduces memory demand. +* Freeing old object fields: While new program versions can drop object fields, the runtime system should also delete the redundant fields of persistent objects of previous program versions. This could be realized during garbage collection when objects are copied. For this purpose, the runtime system may maintain a set of field hashes in use and consult this table during garbage collection. Another, probably too restrictive solution could be to disallow field removal (subtyping) on object upgrades during the memory compatibility check. diff --git a/design/PersistenceModes.md b/design/PersistenceModes.md new file mode 100644 index 00000000000..520cc8b9fbf --- /dev/null +++ b/design/PersistenceModes.md @@ -0,0 +1,50 @@ +# Persistence Modes + +This Motoko build includes two substantially different persistence modes in one build: + +* [Classical Persistence](OldStableMemory.md) (default): + This is the traditional Motoko compiler design based on 32-bit memory and Candid-based stabilization for upgrades. + This mode is known to have severe scalability problems on upgrades, because the stabilization may exceed upgrade instruction limit for stable data amounts, besides other problems such as exponential duplication or stack overflows depending on the data structures. + The mode is temporarily retained to allow beta testing of the new enhanced orthogonal persistence until the new persistence is officialized. +* [Enhanced Orthogonal Persistence](OrthogonalPersistence.md) (new, for beta testing): + This implements scalable persistence with 64-bit main memory that is retained across upgrades without stabilization to stable memory. + The mode needs to be enabled by the compiler flag `--enhanced-orthogonal-persistence` and is intended to become the future default mode, deprecating classical persistence. + +The reason for having one build instead of two separate branches and release artefact is for having a unified branch, and ensure that new features are implemented and tested for both persistence modes, passing the same CI. + +## Compiler Flags + +* (no flag): Use classical persistence +* `--enhanced-orthogonal-persistence`: Use enhanced orthogonal persistence. NOTE: This is currently in the **beta testing** phase. + +Certain compiler flags are only applicable to a specific persistence mode: + +Flag | Applicable Mode +------------------|---------------- +--rts-stack-pages | Classical persistence only +--stabilization-instruction-limit | Enhanced persistence only +--copying-gc | Classical persistence only +--compacting-gc | Classical persistence only +--generational-gc | Classical persistence only + +(All other flags are applicable to both modes.) + +Incremental graph copy stabilization with `__motoko_stabilize_before_upgrade` and `__motoko_destabilize_after_upgrade` are only available with enhanced orthogonal persistence and only needed in a seldom case of memory layout upgrade. + +## Source Structure + +## Runtime System +The Motoko runtime system (RTS) is a combined source base supporting 3 modes, each with a debug and release build: +* 32-bit classical persistence, with classical non-incremental GCs +* 32-bit classical persistence, with the incremental GC +* 64-bit enhanced orthogonal persistence + +## Compiler +For pragmatic purposes, the compiler backend is split/duplicated in two parts +* `compile-enhanced.ml`: Enhanced orthogonal persistence, 64-bit, passive data segments, incremental graph copy. +* `compile-classical.ml`: Classical persistence, 32-bit, Candid stabilization. + +The linker integrates both persistence modes and 32-bit and 64-bit in one package. + +## Tests +Most tests run on both modes. Specific tests apply to selected modes, as defined by the `ENHANCED-ORTHOGONAL-PERSISTENCE` or `CLASSICAL-PERSISTENCE` tags. diff --git a/design/Stable.md b/design/Stable.md index 2dfd34975f6..eba0623e275 100644 --- a/design/Stable.md +++ b/design/Stable.md @@ -22,10 +22,12 @@ Concretely, the syntax of `` is extended as follows: ``` Additional restrictions apply: -* Either a `stable` or `flexible` modifier _must_ appear on `let` and `var` declarations that are actor fields. +* A `stable` or `flexible` modifier _can_ appear on `let` and `var` declarations that are actor fields. * A `stable` or `flexible` modifier _must not_ appear anywhere else. -Both restrictions may be relaxed in the future. +Currently, `flexible` is assumed as implicit keyword on actor fields if no keyword is declared. +However, we should revise this design, as it may lead to accidental loss of data on upgrade if programmers accidentally forgot to specify `stable`. +In other languages of orthogonal persistence, pointers are by default persistent, analogous to `stable` in Motoko. (Note: One possible future use case might be to mark private methods as stable, which would be a requisite that they can be handed out as capabilities, because such methods must also remain backwards compatible.) @@ -122,7 +124,7 @@ Question: Should the stable signature become a superset of Candid signatures, i. Like the Candid IDL, the Motoko compiler can produce stable signatures for the actors it compiles. -We will also need a tool (the compiler, or a separate one?) that can compare stable signature and verify that an extension is valid according to the Motoko subtyping rules. +By using `moc --stable-compatible`, one can compare stable signature and verify that an extension is valid according to the Motoko subtyping rules. To make that test reliable, the stable signature of an actor should be contained in the Wasm module of a deployed Motoko actor. That way, it is ensured that accurate signature information is always available for each installed actor. @@ -132,17 +134,14 @@ In either case, it is probably sufficient to use a textual representation. Like for the IDL, the System would need to provide a way to extract this information from an on-chain canister. +For even higher safety, [enhanced orthogonal persistence](OrthogonalPersistence.md) integrates the compatibility check in the runtime system, +such that it is atomically guarded and cannot be bypassed e.g. by skipping a `dfx` stable compatibility warning. ## Upgrade Hooks The System API provides a number of hooks that a canister can implement. In particular, this includes the pre & post upgrade hooks. - -Motoko does not currently provide a way to define these hooks. -While the post upgrade hook can be exploited by using expression declarations (see above), there is no immediate way to define the pre upgrade hook. - -Note: This feature could potentially be deferred until later. - +Motoko allows to define custom pre-/post upgrade hooks, see below. ### Syntax @@ -180,4 +179,6 @@ Note: The post-upgrade method differs from expression declarations in the body o ## Implementation -See `OrthogonalPersistence.md`. +Different [persistence modes](OrthogonalPersistence.md): +* [Enhanced orthogonal persistence](OrthogonalPersistence.md). +* [Classical orthogonal persistence](OldStableMemory.md). diff --git a/design/StableMemory.md b/design/StableMemory.md new file mode 100644 index 00000000000..759ba39390d --- /dev/null +++ b/design/StableMemory.md @@ -0,0 +1,30 @@ +# The IC's Stable Memory API + +The IC provides a very small set of functions for operating on stable memory: + +``` +ic0.stable_size : () -> (page_count : i32); // * +ic0.stable_grow : (new_pages : i32) -> (old_page_count : i32); // * +ic0.stable_write : (offset : i32, src : i32, size : i32) -> (); // * +ic0.stable_read : (dst : i32, offset : i32, size : i32) -> (); // * +``` + +(see https://sdk.dfinity.org/docs/interface-spec/index.html#system-api-stable-memory) + +These grow memory and do bulk transfers between Wasm and stable +memory. The `// *` means that they can be called in all contexts +(e.g. init, update, query etc). Direct reads and writes of word-sized +data to/from the stack are not supported but can be emulated at cost. +The initial size of the stable memory is zero. The contents of fresh pages (after grow) is initially zero. + +Note that, in this API, the client is responsible for growing (both +stable and wasm) memory before access by read or write (out-of-bounds +access will trap). + +# Stable Memory Accesses + +Direct stable memory accesses (`ExperimentalStableMemory` in Motoko's base library) has been deprecated. +Instead, [stable regions](StableRegions.md) are available to explicitly access stable memory. + +However, generally, programmers do not need to use stable memory due to the support of orthogonal persistence, +see [Enhanced Orthogonal Persistence](OrthogonalPersistence.md). diff --git a/design/StableRegions.md b/design/StableRegions.md index 604343edbcd..b3fe0cc709d 100644 --- a/design/StableRegions.md +++ b/design/StableRegions.md @@ -13,7 +13,7 @@ The **region manager** is the state and logic to support this generalization. The current stable memory module in `base` has been "experimental" for a long time, and requires a more composable API to graduate from this status. -Stable regions address the problem that today's `ExperimentalStableMemory` module only provides a single, monolithic memory that makes it unsuitable for directly building composable software parts. +Stable regions address the problem that the deprecated `ExperimentalStableMemory` module only provided a single, monolithic memory that makes it unsuitable for directly building composable software parts. Stable regions permit a new API that supports composable use cases. @@ -331,3 +331,7 @@ Since we do not move the remaining blocks of region 0, the first block of memory This design ensures that an existing canister using very large amounts of experimental stable memory can be migrated with only constant-cost movement of the first block (128 pages) of memory. + +## Orthogonal Persistence + +Stable regions can be used together with orthogonal persistence, see [Classical Persistence](OldStableMemory.md) and [Enhanced Orthogonal Persistence](OrthogonalPersistence.md). \ No newline at end of file diff --git a/design/WhitePaper.md b/design/WhitePaper.md index 8045e4d4b41..bac583de1e4 100644 --- a/design/WhitePaper.md +++ b/design/WhitePaper.md @@ -621,14 +621,8 @@ Note: the obvious "solution", namely storing closure environments inside the ori #### Upgrades and Memory -The most difficult problem to solve in the programming model of the IC by far is the question of safe and robust upgrades. Motoko currently uses the IC's _stable memory_ API to serialise the entire heap of an actor into stable memory before an upgrade, and restore it afterwards. The crucial point of this is that the serialised format is fixed and not dependent on the compiler version. Consequently, it is perfectly fine if the new version of the actor has been compiled with a different (typically newer) compiler version that potentially uses a different memory layout internally (e.g., a new garbage collector). - -The drawback is that this serialisation/deserialisation step is expensive. Worse, it may even run out of cycles. - -There are multiple ways in which the representation of stable variables could be improved to avoid this overhead (or rather, trade it off against a different overhead). However, most of them would be extremely costly with the IC's stable memory API. This API was merely a stop-gap measure while we wait for the IC to support the upcoming Wasm proposal for accessing multiple memories. Once this becomes available, it would unlock a number of new and better implementation options. - -Yet, representing all persistent data in terms of serialised Motoko values might never be enough for all use cases. Imagine, for example, emulating a file system or a high-performance data base as persistent storage. For these use cases, Motoko will provide a low-level interface that enables direct access to raw stable memory, leaving it up to respective libraries to build suitable high-level abstraction on top. - +The most difficult problem to solve in the programming model of the IC by far is the question of safe and robust upgrades. +For this purpose, Motoko implements powerful and safe persistence, see [Enhanced Orthogonal Persistence](OrthogonalPersistence.md). #### Upgrades and Methods @@ -669,6 +663,8 @@ The ability to link compiled modules together would also be a first step towards Unfortunately, though, this is a more difficult problem than linking modules compiled in the same language, since the data types, memory management, and calling conventions used by different languages are rarely compatible. Supporting cross-language calls requires a suitable ABI agreed upon by different Wasm compilers, which in turn requires some kind of standard. There is work on a proposal ([interface types](https://github.com/WebAssembly/interface-types/blob/main/proposals/interface-types/Explainer.md)) for Wasm that could be the basis of such a mechanism, but it's not ready yet. +Wasm component model offers a solution for secure intra-canister language interop on the IC. + #### On-chain Linking diff --git a/doc/md/canister-maintenance/compatibility.md b/doc/md/canister-maintenance/compatibility.md index b05fd92739e..957bdf64924 100644 --- a/doc/md/canister-maintenance/compatibility.md +++ b/doc/md/canister-maintenance/compatibility.md @@ -6,13 +6,13 @@ sidebar_position: 4 ## Overview -When upgrading a canister, it is important to verify that an upgrade can proceed without: +When upgrading a canister, it is important to verify that the upgrade can proceed without: +- Introducing an incompatible change in stable declarations. - Breaking clients due to a Candid interface change. -- Discarding the Motoko stable state due to a change in stable declarations. - -Motoko checks these properties statically before attempting the upgrade. +`dfx` checks these properties statically before attempting the upgrade. +Moreover, with [enhanced orthogonal persistence](orthogonal-persistence/enhanced.md), Motoko rejects incompatible changes of stable declarations. ## Upgrade example @@ -21,7 +21,8 @@ The following is a simple example of how to declare a stateful counter: ``` motoko no-repl file=../examples/count-v0.mo ``` -In this example, when the counter is upgraded, its state is lost. +Importantly, in this example, when the counter is upgraded, its state is lost. +This is because actor variables are by default `flexible`, meaning they get reinitialized on an upgrade. To fix this, you can declare a stable variable that is retained across upgrades: @@ -31,52 +32,40 @@ To fix this, you can declare a stable variable that is retained across upgrades: If the variable `state` were not declared `stable`, `state` would restart from `0` on upgrade. +## Evolving the stable declarations -## Evolving the Candid interface - -In this extension of our interface, old clients remain satisfied, while new ones get extra features such as the `read` query in this example. +Changing counter from `Nat` to `Int` is a compatible change in stable declarations. The counter value is retained during the upgrade. ``` motoko no-repl file=../examples/count-v2.mo ``` -## Changing the stable interface - -Let's take a look at an example where the counter is refactored from using [`Int`](../base/Int.md) to [`Nat`](../base/Nat.md). - -``` motoko no-repl file=../examples/count-v3.mo -``` - -Now, the code has been upgraded, but the counter value is back to `0`. The state was lost in an upgrade. - -This is because the Candid interface evolved safely​ but the stable types did not. - -An upgrade must be able to: - -- Consume any stable variable value from its predecessor, or - -- Run the initializer for a new stable variable. - -Since `Int `: Checks two `.most` files for upgrade compatibility. -To upgrade from `cur.wasm` to `nxt.wasm` we need check that both the Candid interface and stable variables are compatible. +Motoko embeds `.did` and `.most` files as Wasm custom sections for use by `dfx` or other tools. + +To upgrade e.g. from `cur.wasm` to `nxt.wasm`, `dfx` checks that both the Candid interface and stable variables are compatible: ``` didc check nxt.did cur.did // nxt <: cur moc --stable-compatible cur.most nxt.most // cur <<: nxt ``` -Using the versions above, the upgrade from `v2` to `v3` fails this check: +Using the versions above, the upgrade from `v3` to `v4` fails this check: ``` -> moc --stable-compatible v2.most v3.most +> moc --stable-compatible v3.most v4.most (unknown location): Compatibility error [M0170], stable variable state of previous type var Int cannot be consumed at new type - var Nat + var Float ``` -Because of the compatibility error, you should not attempt to upgrade from `v2.wasm` to `v3.wasm`. The result of upgrading is unpredictable. At best, the upgrade will detect the incompatibility, trap and roll back to the current version, as if the upgrade had never been attempted. At worst, the upgrade will appear to succeed, but lose some or all of the state of the previous version, re-initializing some of the stable variables you intended to preserve. +With [enhanced orthogonal persistence](orthogonal-persistence/enhanced.md), compatibility errors of stable variables are always detected in the runtime system and if failing, the upgrade is safely rolled back. -One way to correctly change the logical state to [`Nat`](../base/Nat.md), is to introduce a new stable variable, `newState`, of type [`Nat`](../base/Nat.md), initialized from the old one (`state`). Unlike the stable signature of v3.wasm, the stable signature of v4.wasm: +:::danger +With [classical orthogonal persistence](orthogonal-persistence/classical.md), however, an upgrade attempt from `v2.wasm` to `v3.wasm` is unpredictable and may lead to partial or complete data loss, if the `dfx` warning is ignored. +::: -``` motoko no-repl file=../examples/count-v4.mo -``` - -``` motoko no-repl file=../examples/count-v4.most -``` - -## Incompatible upgrade example +## Adding record fields A common, real-world example of an incompatible upgrade can be found [on the forum](https://forum.dfinity.org/t/questions-about-data-structures-and-migrations/822/12?u=claudio/). In that example, a user was attempting to add a field to the record payload of an array, by upgrading from stable type interface: ``` motoko no-repl -type Card = { - title : Text -}; actor { - stable var map: [(Nat32, Card)] -} + type Card = { + title : Text; + }; + stable var map : [(Nat32, Card)] = [(0, { title = "TEST"})]; +}; ``` to *incompatible* stable type interface: ``` motoko no-repl -type Card = { - title : Text; - description : Text +actor { + type Card = { + title : Text; + description : Text; + }; + stable var map : [(Nat32, Card)] = []; }; +``` + +### Problem + +When trying this upgrade, `dfx` issues the following warning: + +``` +Stable interface compatibility check issued an ERROR for canister ... +Upgrade will either FAIL or LOSE some stable variable data. + +(unknown location): Compatibility error [M0170], stable variable map of previous type + var [(Nat32, Card)] +cannot be consumed at new type + var [(Nat32, Card__1)] + +Do you want to proceed? yes/No +``` +It is recommended not to continue, as you will lose the state in older versions of Motoko that use [classical orthogonal persistence](orthogonal-persistence/classical.md). +Upgrading with [enhanced orthogonal persistence](orthogonal-persistence/enhanced.md) will trap and roll back, keeping the old state. + +Adding a new record field to the type of existing stable variable is not supported. The reason is simple: The upgrade would need to supply values for the new field out of thin air. In this example, the upgrade would need to conjure up some value for the `description` field of every existing `card` in `map`. Moreover, allowing to add optional fields is also a problem, as a record can be shared from various variables with different static types, some of them already declaring the added field or adding a same-named optional field with a potentially different type (and/or different semantics). + +### Solution + +To resolve this issue, again, an [explicit](#explicit-migration) is needed: + +1. We keep the old variable `map` with the same structural type. However, we are allowed to change type alias name (`Card` to `OldCard`). +2. We introduce a new variable `newMap` and copy the old state to the new one, initializing the new field as needed. +3. We upgrade to this new version. + +``` motoko no-repl +import Array "mo:base/Array"; + actor { - stable var map : [(Nat32, Card)] -} + type OldCard = { + title : Text; + }; + type NewCard = { + title : Text; + description : Text; + }; + + stable var map : [(Nat32, OldCard)] = []; + stable var newMap : [(Nat32, NewCard)] = Array.map<(Nat32, OldCard), (Nat32, NewCard)>( + map, + func(key, { title }) { (key, { title; description = "" }) }, + ); +}; ``` -Adding a new record field does not work. The reason is simple: the upgrade would need to supply values for the new field out of thin air. In this example, the upgrade would need to conjure up some value for the `description` field of every existing `card` in `map`. +4. **After** we have successfully upgraded to this new version, we can upgrade once more to a version, that drops the old `map`. -## Metadata sections +``` motoko no-repl +actor { + type Card = { + title : Text; + description : Text; + }; + stable var newMap : [(Nat32, Card)] = []; +}; +``` -Motoko embeds `.did` and `.most` files as Wasm custom sections for use by other tools, e.g. dfx. +`dfx` will issue a warning that `map` will be dropped. -`dfx deploy` and `dfx canister install --all --mode upgrade` commands check that the interface is compatible, and if not, show this message and ask if you want to continue: +Make sure, you have previously migrated the old state to `newMap` before applying this final reduced version. ``` -let msg = format!("Candid interface compatibility check failed for canister '{}'.\nYou are making a BREAKING change. Other canisters or frontend clients relying on your canister may stop working.\n\n", canister_info.get_name()) + &err; +Stable interface compatibility check issued a WARNING for canister ... +(unknown location): warning [M0169], stable variable map of previous type + var [(Nat32, OldCard)] + will be discarded. This may cause data loss. Are you sure? ``` Logo diff --git a/doc/md/canister-maintenance/orthogonal-persistence/_category_.yml b/doc/md/canister-maintenance/orthogonal-persistence/_category_.yml new file mode 100644 index 00000000000..cb2f5e1e941 --- /dev/null +++ b/doc/md/canister-maintenance/orthogonal-persistence/_category_.yml @@ -0,0 +1,4 @@ +position: 5 +label: 'Orthogonal persistence' +collapsible: true # make the category collapsible +collapsed: true diff --git a/doc/md/canister-maintenance/orthogonal-persistence/classical.md b/doc/md/canister-maintenance/orthogonal-persistence/classical.md new file mode 100644 index 00000000000..fe14a8c8a5f --- /dev/null +++ b/doc/md/canister-maintenance/orthogonal-persistence/classical.md @@ -0,0 +1,23 @@ +--- +sidebar_position: 3 +--- + +# Classical orthogonal persistence + +Classical orthogonal persistence is the old implementation of Motoko's orthogonal persistence. It is currently still the default option, as enhanced orthogonal persistence is in beta-testing stage. + +On an upgrade, the classical orthogonal persistence mechanism serializes all stable data to the stable memory and then again deserializes it back to the main memory. This has several downsides: + +* At maximum, 2 GB of heap data can be persisted across upgrades. This is because of an implementation restriction, first copying the stable data back in main memory and then deserializing it from there. Note that in practice, the supported amount of stable data can be way lower. +* Shared immutable heap objects can duplicated, leading to potential state explosion on upgrades. +* Deeply nested structures can lead to a call stack overflow. +* The serialization and deserialization is expensive and can hit IC's instruction limits. +* There is no inbuilt stable compatibility check in the runtime system. If users ignore the `dfx` upgrade warning, data may be lost or upgrade fails. + +:::danger +All these issues can lead to a stuck canister that can no longer be upgraded. +It is therefore absolutely necessary to thoroughly test how much data an upgrade of your application can handle and then conservatively limit the data held by that canister. +Moreover, it is good to have a backup possibility to rescue data even if upgrades fail, e.g. by controller-privileged data query calls. +::: + +These issues will be solved by [enhanced orthogonal persistence](enhanced.md). diff --git a/doc/md/canister-maintenance/orthogonal-persistence/enhanced.md b/doc/md/canister-maintenance/orthogonal-persistence/enhanced.md new file mode 100644 index 00000000000..5b46305e6bd --- /dev/null +++ b/doc/md/canister-maintenance/orthogonal-persistence/enhanced.md @@ -0,0 +1,101 @@ +--- +sidebar_position: 2 +--- + +# Enhanced orthogonal persistence + +This implements the vision of efficient and scalable orthogonal persistence in Motoko that combines: +* **Stable heap**: Persisting the program main memory across canister upgrades. +* **64-bit heap**: Extending the main memory to 64-bit for large-scaled persistence. + +As a result, the use of secondary storage (explicit stable memory, dedicated stable data structures, DB-like storage abstractions) will no longer be necessary: Motoko developers can directly work on their normal object-oriented program structures that are automatically persisted and retained across program version changes. + +### Activation +Enhanced orthogonal persistence is currently offered for **beta testing** via the compiler flag `--enhanced-orthogonal-persistence`. + +To activate enhanced orthogonal persistence under `dfx`, the following command-line argument needs to be specified in `dfx.json`: + +``` +... + "type" : "motoko" + ... + "args" : "--incremental-gc" +... +``` + +:::tip +Despite the use of enhanced orthogonal persistence, it is strongly recommended to thoroughly test the upgrades of your application. +Moreover, it is advised to have a backup possibility for rescuing data even when upgrades fail, e.g. by controller-privileged data query calls. +::: + +[Classical orthogonal persistence](classical.md) with 32-bit main memory and Candid stabilization currently remains the default mode. +See [orthogonal persistence modes](modes.md) for a comparison. + +## Advantages +Compared to the existing orthogonal persistence in Motoko, this design offers: +* **Performance**: New program versions directly resume from the existing main memory and have access to the memory-compatible data. +* **Scalability**: The upgrade mechanism scales with larger heaps and in contrast to serialization, does not hit IC instruction limits. + +Compared to the explicit use of stable memory, this design improves: +* **Simplicity**: Developers do not need to deal with explicit stable memory. +* **Performance**: No copying to and from the separate stable memory is necessary. + +## Design +The enhanced orthogonal persistence is based on the following main properties: +* Extension of the IC to retain main memory on upgrades. +* Supporting 64-bit main memory on the IC. +* A long-term memory layout that is invariant to new compiled program versions. +* A fast memory compatibility check performed on each canister upgrade. +* Incremental garbage collection using a partitioned heap. + +### Compatibility Check +Upgrades are only permitted if the new program version is compatible with the old version, such that the runtime system guarantees a compatible memory structure. + +Compatible changes for immutable types are largely analogous to the allowed Motoko subtype relation modulo some flexibility for actor fields, i.e. +* Adding or removing actor fields. +* Changing mutability of actor fields (`let` to `var` and vice-versa). +* Removing object fields. +* Adding variant fields. +* Changing `Nat` to `Int`. +* Supporting hared function parameter contravariance and return type covariance. +* Any other change according to Motoko's subtyping rule. + +The runtime system checks checks migration compatibility on upgrade, and if not fulfilled, rolls back the upgrade. This compatibility check serves as an additional safety measure on top of the `dfx` warning that can be bypassed by users. + +Any more complex change can be performed with programmatic instruction, see [explicit migration](../upgrades.md#explicit-migration). + +### Migration Path +When migrating from the old serialization-based stabilization to the new persistent heap, the old data is deserialized one last time from stable memory and then placed in the new persistent heap layout. Once operating on the persistent heap, the system should prevent downgrade attempts to the old serialization-based persistence. + +#### Graph-Copy-Based Stabilization +Assuming that the persistent memory layout needs to be changed in the future, the runtime system supports serialization and deserialization to and from stable memory in a defined data format using graph-copy-based stabilization. Arbitrarily large data can be serialized and deserialized beyond the instruction and working set limit of upgrades: Large data serialization and deserialization is split in multiple messages, running before and/or after the IC upgrade to migrate large heaps. Of course, other messages will be blocked during this process and only the canister owner or the canister controllers are permitted to initiate this process. + +This will only be needed in rare situation when Motoko's implementation changes its internal memory layout. Users will then be instructed to explicitly initiate this migration. + +#### Usage +Graph-copy-based stabilization can be performed in three steps: + +1. Initiate the explicit stabilization before the upgrade: + +``` +dfx canister call CANISTER_ID __motoko_stabilize_before_upgrade "()" +``` + +2. Run the actual upgrade: + +``` +dfx deploy CANISTER_ID +``` + +3. Complete the explicit destabilization after the upgrade: + +``` +dfx canister call CANISTER_ID __motoko_destabilize_after_upgrade "()" +``` + +Remarks: +* When receiving the `dfx` error "The request timed out." during explicit stabilization, upgrade, or destabilization, one can simply repeat the call until it completes. +* Steps 3 (explicit destabilization) may not be needed if the corresponding operation fits into the upgrade message. + +### Old Stable Memory +The old stable memory remains equally accessible as secondary (legacy) memory with the new support. Therefore, stable regions can be combined with orthogonal persistence. diff --git a/doc/md/canister-maintenance/orthogonal-persistence/modes.md b/doc/md/canister-maintenance/orthogonal-persistence/modes.md new file mode 100644 index 00000000000..0af9f7b680f --- /dev/null +++ b/doc/md/canister-maintenance/orthogonal-persistence/modes.md @@ -0,0 +1,11 @@ +--- +sidebar_position: 1 +--- + +# Persistence modes + +Motoko features two implementations for orthogonal persistence: + +* [Enhanced orthogonal persistence](enhanced.md), currently in beta stage, provides very fast upgrades, scaling independently of the heap size: This is realized by retaining the Wasm entire main memory on an upgrade and simply performing a type-driven upgrade safety check. By using 64-bit address space, it is designed to scale beyond 4 GB and in future, offer the same capacity like stable memory. + +* [Classical orthogonal persistence](classical.md) is the old implementation of orthogonal persistence that will be superseded by enhanced orthogonal persistence. On upgrade, the runtime system first serializes the persistent data to stable memory and then deserializes it back again to main memory. While this is both inefficient and unscalable, it exhibits problems on shared immutable data (potentially leading to state explosion), deep structures (call stack overflow) and larger heaps (the implementation limits the stable data to at most 2 GB). diff --git a/doc/md/canister-maintenance/upgrades.md b/doc/md/canister-maintenance/upgrades.md index 340cada8d1a..5e1d89e3bf1 100644 --- a/doc/md/canister-maintenance/upgrades.md +++ b/doc/md/canister-maintenance/upgrades.md @@ -6,21 +6,19 @@ sidebar_position: 3 ## Overview -One key feature of ICP is its ability to persist canister state using WebAssembly memory and globals rather than a traditional database. This means that the entire state of a canister is restored before and saved after each message, without explicit user instruction. This automatic and user-transparent preservation of state between messages is called **orthogonal persistence**. +One key feature of Motoko is its ability to automatically persist the program's state without explicit user instruction, called **orthogonal persistence**. This not only covers persistence across transactions but also includes canister upgrades. For this purpose, Motoko features a bespoke compiler and runtime system that manages upgrades in a sophisticated way such that a new program version can pick of the state left behind by a previous program version. As a result, Motoko data persistence is not simple, but also prevents data corruption and loss and is efficient at the same time. No database, stable memory API, or stable data structure is required to retain state across upgrades. Instead, a simple `stable` keyword is sufficient declare an data structure of arbitrary shape persistent, even if the structure uses sharing, has a deep complexity, or contains cycles. -Though convenient, orthogonal persistence poses a challenge when it comes to upgrading the code of a canister. Without an explicit representation of the canister’s state, how does one transfer any application data from the retired canister to its replacement? For example, if you want to deploy a new version of a user registration canister to fix an issue or add functionality, you need to ensure that existing registrations survive the upgrade process. - -Accommodating upgrades without data loss requires some new facility to **migrate** a canister’s crucial data to the upgraded canister. ICP's persistence model allows a canister to save and restore long-lived data to dedicated **stable memory** that, unlike ordinary canister memory, is not cleared but retained across an upgrade. This facility allows a canister to transfer data in bulk to its replacement canister, provided that data is placed in stable memory, either throughout its lifetime, or just before an upgrade. - -Motoko provides high-level support for preserving state that leverages stable memory. This feature, called **stable storage**, is designed to accommodate changes to both the application data and the Motoko compiler used to produce the application code. - -Utilizing stable storage depends on the developer anticipating and indicating the data to retain after an upgrade. Depending on the application, the data you decide to persist might be some, all, or none of a given actor’s state. +This is substantially different to other languages supported on the IC, which use off-the-shelf language implementations that are not designed for orthogonal persistence in mind: They rearrange memory structures in an uncontrolled manner on re-compilation or at runtime. As an alternative, in other languages, programmers have to explicilty use stable memory or special stable data structures to rescue their data between upgrades. Contrary to Motoko, this approach is not only cumbersome, but also unsafe and inefficient. Compared to using stable data structures, Motoko's orthogonal persistence allows more natural data modeling and significantly faster data accesses, eventually resulting in way more efficient programs. ## Declaring stable variables -In an actor, you can configure a variable to use stable storage through the `stable` keyword modifier in the variable’s declaration. +In an actor, you can configure which part of the program is considered to be persistent, i.e. survives upgrades, and which part are ephemeral, i.e. are reset on upgrades. + +More precisely, each `let` and `var` variable declaration in an actor can specify whether the variable is `stable` or `flexible`. If you don’t provide a modifier, the variable is assumed to be `flexible` by default. -More precisely, every `let` and `var` variable declaration in an actor can specify whether the variable is `stable` or `flexible`. If you don’t provide a modifier, the variable is declared as `flexible` by default. +The semantics of the modifiers is as follows: +* `stable` means that all values directly or indirectly reachable from that stable actor variable are considered persistent and automatically retained across upgrades. This is the primary choice for most of the program's state. +* `flexible` means that the variable is re-initialized on upgrade, such that the values referenced by this flexible variable can be discarded, unless the values are transitively reachable by other variables that are stable. `flexible` is only used for temporal state or references to high-order types, such as local function references, see [stable types](#stable-types). The following is a simple example of how to declare a stable counter that can be upgraded while preserving the counter’s value: @@ -35,41 +33,23 @@ You can only use the `stable` or `flexible` modifier on `let` and `var` declarat When you first compile and deploy a canister, all flexible and stable variables in the actor are initialized in sequence. When you deploy a canister using the `upgrade` mode, all stable variables that existed in the previous version of the actor are pre-initialized with their old values. After the stable variables are initialized with their previous values, the remaining flexible and newly-added stable variables are initialized in sequence. -## Preupgrade and postupgrade system methods - -Declaring a variable to be `stable` requires its type to be stable too. Since not all types are stable, some variables cannot be declared `stable`. - -As a simple example, consider the following `Registry` actor: - -``` motoko file=../examples/Registry.mo -``` - -This actor assigns sequential identifiers to [`Text`](../base/Text.md) values, using the size of the underlying `map` object to determine the next identifier. Like other actors, it relies on orthogonal persistence to maintain the state of the hashmap between calls. - -This example would like to make the `Register` upgradable without the upgrade losing any existing registrations, but its state, `map`, has a proper object type that contains member functions, so the `map` variable cannot be declared `stable`. - -For scenarios like this that can’t be solved using stable variables alone, Motoko supports user-defined upgrade hooks that run immediately before and after an upgrade. These upgrade hooks allow you to migrate state between unrestricted flexible variables to more restricted stable variables. These hooks are declared as `system` functions with special names, `preugrade` and `postupgrade`. Both functions must have type `: () → ()`. - -The `preupgrade` method lets you make a final update to stable variables before the runtime commits their values to stable memory and performs an upgrade. The `postupgrade` method is run after an upgrade has initialized the replacement actor, including its stable variables, but before executing any shared function call or message on that actor. - -The following example introduces a new stable variable, `entries`, to save and restore the entries of the unstable hash table: - -``` motoko file=../examples/StableRegistry.mo -``` +:::danger +Do not forget to declare variables `stable` if they should survive canister upgrades as the default is `flexible` if no modifier is declared. +::: -Note that the type of `entries`, being an array of [`Text`](../base/Text.md) and [`Nat`](../base/Nat.md) pairs, is indeed a stable type. +## Persistence modes -In this example, the `preupgrade` system method writes the current `map` entries to `entries` before `entries` is saved to stable memory. The `postupgrade` system method resets `entries` to the empty array after `map` has been populated from `entries`. +Motoko currently features two implementations for orthogonal persistence, see [persistence modes](orthogonal-persistence/modes.md). -## Typing +## Stable types Because the compiler must ensure that stable variables are both compatible with and meaningful in the replacement program after an upgrade, every `stable` variable must have a stable type. A type is stable if the type obtained by ignoring any `var` modifiers within it is shared. -The only difference between stable types and shared types is the former’s support for mutation. Like shared types, stable types are restricted to first-order data, excluding local functions and structures built from local functions, such as objects. This exclusion of functions is required because the meaning of a function value, consisting of both data and code, cannot easily be preserved across an upgrade. The meaning of plain data, mutable or not, can be. +The only difference between stable types and shared types is the former’s support for mutation. Like shared types, stable types are restricted to first-order data, excluding local functions and structures built from local functions (such as class instances). This exclusion of functions is required because the meaning of a function value, consisting of both data and code, cannot easily be preserved across an upgrade. The meaning of plain data, mutable or not, can be. :::note -In general, object types are not stable because they can contain local functions. However, a plain record of stable data is a special case of object types that are stable. Moreover, references to actors and shared functions are also stable, allowing you to preserve their values across upgrades. For example, you can preserve the state record of a set of actors or shared function callbacks subscribing to a service. +In general, classes are not stable because they can contain local functions. However, a plain record of stable data is a special case of object types that are stable. Moreover, references to actors and shared functions are also stable, allowing you to preserve their values across upgrades. For example, you can preserve the state record of a set of actors or shared function callbacks subscribing to a service. ::: @@ -83,39 +63,39 @@ For variables that do not have a stable type, there are two options for making t - [StableHashMap](https://github.com/canscale/StableHashMap) - [StableRBTree](https://github.com/canscale/StableRBTree) -2. You can convert the variable to another type that is stable, such as the following example that converts a `Buffer` to an `Array`: - -```motoko -let theProjectsBuffer : Buffer.Buffer = Buffer.Buffer(theProjectsNew.size()); - -for (x in theProjectsNew.vals()) { - -theProjectsBuffer.add(x); - -}; - -theProjectsBuffer.add(newProject); +:::note +Unlike stable data structures in the Rust CDK, these modules do not use stable memory but rely on orthogonal persistence. The adjective "stable" only denotes a stable type in Motoko. +::: -return theProjectsBuffer.toArray(); -``` +2. Extract the state in a stable type, and wrap it in the non-stable type. -Here is another example that takes a `HashMap` type and creates a stable `Array`: +For example, the stable type `TemperatureSeries` covers the persistent data, while the non-stable type `Weather` wraps this with additional methods (local function types). ```motoko -private var canisters: HashMap.HashMap = HashMap.HashMap(10, isPrincipalEqual, Principal.hash); - -private stable var upgradeCanisters : [(Principal, CanisterId)] = []; - -system func preupgrade() { - upgradeCanisters := Iter.toArray(canisters.entries()); +actor { + type TemperatureSeries = [Float]; + + class Weather(temperatures : TemperatureSeries) { + public func averageTemperature() : Float { + var sum = 0.0; + var count = 0.0; + for (value in temperatures.vals()) { + sum += value; + count += 1; + }; + return sum / count; }; + }; - system func postupgrade() { - canisters := HashMap.fromIter(upgradeCanisters.vals(), 10, isPrincipalEqual, Principal.hash); - upgradeCanisters := []; - }; + stable var temperatures : TemperatureSeries = [30.0, 31.5, 29.2]; + flexible var weather = Weather(temperatures); +}; ``` +3. __Not recommended__: [Pre- and post-upgrade hooks](#preupgrade-and-postupgrade-system-methods) allow to copy non-stable types to stable types during upgrades. +The downside of this approach is that it is error-prone and does not scale for large data. +Conceptually, it also does not align well with the idea of orthogonal persistence. + ## Stable type signatures The collection of stable variable declarations in an actor can be summarized in a stable signature. @@ -138,13 +118,14 @@ You can emit the stable signature of the main actor or actor class to a `.most` ::: -A stable signature `` is stable-compatible with signature ``, if: +A stable signature `` is stable-compatible with signature ``, if for each stable field ` : T` in `` one of the following conditions hold: -- Every immutable field `stable : T` in `` has a matching field `stable : U` in `` with `T <: U`. +- `` does not contain a stable field ``. +- `` has a matchng stable field ` : U` with `T <: U`. -- Every mutable field `stable var : T` in `` has a matching field `stable var : U` in `` with `T <: U`. +Note that `` may contain additional fields or abandon fields of ``. Mutability can be different for matching fields. -Note that `` may contain additional fields. Typically, `` is the signature of an older version while `` is the signature of a newer version. +`` is the signature of an older version while `` is the signature of a newer version. The subtyping condition on stable fields ensures that the final value of some field can be consumed as the initial value of that field in the upgraded code. @@ -154,27 +135,25 @@ You can check the stable-compatibility of two `.most` files containing stable si ::: -:::note - -The stable-compatible relation is quite conservative. In the future, it may be relaxed to accommodate a change in field mutability and/or abandoning fields from `` but with a warning. - -::: ## Upgrade safety -Before upgrading a deployed canister, you should ensure that the upgrade is safe and will not: +When upgrading a canister, it is important to verify that the upgrade can proceed without: -- Break existing clients due to a Candid interface change. +- Introducing an incompatible change in stable declarations. +- Breaking clients due to a Candid interface change. -- Discard Motoko stable state due to an incompatible change in stable declarations. +With [enhanced orthogonal persistence](orthogonal-persistence/enhanced.md), Motoko rejects incompatible changes of stable declarations during upgrade attempt. +Moreover, `dfx` checks the two conditions before attempting then upgrade and warns users correspondingly. A Motoko canister upgrade is safe provided: -- The canister’s Candid interface evolves to a Candid subtype. - +- The canister’s Candid interface evolves to a Candid subtype. - The canister’s Motoko stable signature evolves to a stable-compatible one. -Upgrade safety does not guarantee that the upgrade process will succeed, as it can still fail due to resource constraints. However, it should at least ensure that a successful upgrade will not break Candid type compatibility with existing clients or unexpectedly lose data that was marked `stable`. +:::danger +With [classical orthogonal persistence](orthogonal-persistence/classical.md), the upgrade can still fail due to resource constraints. This is problematic as the canister can then not be upgraded. It is therefore strongly advised to test the scalability of upgrades well. Enhanced orthogonal persistence will abandon this issue. +::: :::tip @@ -182,21 +161,71 @@ You can check valid Candid subtyping between two services described in `.did` fi ::: +## Upgrading a deployed actor or canister + +After you have deployed a Motoko actor with the appropriate `stable` variables, you can use the `dfx deploy` command to upgrade an already deployed version. For information about upgrading a deployed canister, see [upgrade a canister smart contract](/docs/current/developer-docs/smart-contracts/maintain/upgrade). -## Metadata sections +`dfx deploy` checks that the interface is compatible, and if not, show this message and ask if you want to continue: -The Motoko compiler embeds the Candid interface and stable signature of a canister as canister metadata, recorded in additional Wasm custom sections of a compiled binary. +``` +You are making a BREAKING change. Other canisters or frontend clients relying on your canister may stop working. +``` -This metadata can be selectively exposed by ICP and used by tools such as `dfx` to verify upgrade compatibility. +In addition, Motoko with enhanced orthogonal persistence implements extra safe guard in the runtime system to ensure that the stable data is compatible, to exclude any data corruption or misinterpretation. Moreover, `dfx` also warns about dropping of stable variables. -## Upgrading a deployed actor or canister +## Data migration -After you have deployed a Motoko actor with the appropriate `stable` variables or `preupgrade` and `postupgrade` system methods, you can use the `dfx canister install` command with the `--mode=upgrade` option to upgrade an already deployed version. For information about upgrading a deployed canister, see [upgrade a canister smart contract](/docs/current/developer-docs/smart-contracts/maintain/upgrade). +Often, the data representation changes with a new program version. For orthogonal persistence, it is important the language is able to allow flexible data migration to the new version. -`dfx canister install --mode=upgrade` checks that the interface is compatible, and if not, show this message and ask if you want to continue: +Motoko supports two kinds of data migrations: Implicit migration and explicit migration. -``` -let msg = format!("Candid interface compatibility check failed for canister '{}'.\nYou are making a BREAKING change. Other canisters or frontend clients relying on your canister may stop working.\n\n", canister_info.get_name()) + &err; -``` +### Implicit migration + +This is automatically supported when the new program version is stable-compatible with the old version. The runtime system of Motoko then automatically handles the migration on upgrade. + +More precisely, the following changes can be implicitly migrated: +* Adding or removing actor fields. +* Changing mutability of the actor field. +* Removing record fields. +* Adding variant fields. +* Changing `Nat` to `Int`. +* Shared function parameter contravariance and return type covariance. +* Any change that is allowed by the Motoko's subtyping rule. + +### Explicit migration + +Any more complex migration is possible by user-defined functionality. + +For this purpose, a three step approach is taken: +1. Introduce new variables of the desired types, while keeping the old declarations. +2. Write logic to copy the state from the old variables to the new variables on upgrade. +3. Drop the old declarations, once all data has been migrated. + +For more information, see the [example of explicit migration](compatibility.md#explicit-migration). + +## Legacy features + +The following aspects are retained for historical reasons and backwards compatibility: + +### Preupgrade and postupgrade system methods + +This is only advanced functionality that is not recommended for standard cases, as it is error-prone and can render the canister unusable. + +Motoko supports user-defined upgrade hooks that run immediately before and after an upgrade. These upgrade hooks allow to trigger additional logic on upgrade. +These hooks are declared as `system` functions with special names, `preugrade` and `postupgrade`. Both functions must have type `: () → ()`. + +:::danger +If `preupgrade` raises a trap or hits the instruction limit or another IC computing limit, the upgrade cannot no longer succeed and the canister is stuck with the existing version. +::: + +:::tip +`postupgrade` is not needed as the equal effect can be achieved by introducing initializing expressions in the actor, e.g. non-stable let expressions or expression statements. +::: + +### Stable Memory and stable regions + +Stable memory was introduced on the IC to allow upgrades in languages that do not implement orthogonal persistence of the main memory. This is the case with Motoko's classical persistence as well as other languages besides Motoko. + +Stable memory and stable regions can still be used in combination with orthogonal persistence, although there is little practical need for this with enhanced orthogonal persistence and the future large main memory capacity on the IC. Logo diff --git a/doc/md/examples/count-v0.did b/doc/md/examples/count-v0.did index f743214c13b..c4b13980af3 100644 --- a/doc/md/examples/count-v0.did +++ b/doc/md/examples/count-v0.did @@ -1,3 +1,3 @@ service : { - inc: () -> (int); -} + increment : () -> (); +}; diff --git a/doc/md/examples/count-v0.mo b/doc/md/examples/count-v0.mo index 5981d106d44..62ab0849f25 100644 --- a/doc/md/examples/count-v0.mo +++ b/doc/md/examples/count-v0.mo @@ -1,10 +1,10 @@ -actor Counter_v0 { +import Debug "mo:base/Debug"; - var state : Int = 0; +actor Counter_v0 { + var state : Nat = 0; - public func inc() : async Int { + public func increment() : async () { state += 1; - return state; + Debug.print(debug_show (state)); }; - -} +}; diff --git a/doc/md/examples/count-v0.most b/doc/md/examples/count-v0.most index a1802043bdc..1fe45ff42ed 100644 --- a/doc/md/examples/count-v0.most +++ b/doc/md/examples/count-v0.most @@ -1,3 +1,2 @@ -actor { - +actor { }; diff --git a/doc/md/examples/count-v1.did b/doc/md/examples/count-v1.did index f743214c13b..c4b13980af3 100644 --- a/doc/md/examples/count-v1.did +++ b/doc/md/examples/count-v1.did @@ -1,3 +1,3 @@ service : { - inc: () -> (int); -} + increment : () -> (); +}; diff --git a/doc/md/examples/count-v1.mo b/doc/md/examples/count-v1.mo index 2cc2360632d..b75935bb314 100644 --- a/doc/md/examples/count-v1.mo +++ b/doc/md/examples/count-v1.mo @@ -1,9 +1,10 @@ -actor Counter_v1 { +import Debug "mo:base/Debug"; - stable var state : Int = 0; +actor Counter_v1 { + stable var state : Nat = 0; - public func inc() : async Int { + public func increment() : async () { state += 1; - return state; + Debug.print(debug_show (state)); }; -} +}; diff --git a/doc/md/examples/count-v1.most b/doc/md/examples/count-v1.most index 31801da83fa..302a837fa10 100644 --- a/doc/md/examples/count-v1.most +++ b/doc/md/examples/count-v1.most @@ -1,3 +1,3 @@ actor { - stable var state : Int + stable var state : Nat }; diff --git a/doc/md/examples/count-v2.did b/doc/md/examples/count-v2.did index 57e9012e2bb..c4b13980af3 100644 --- a/doc/md/examples/count-v2.did +++ b/doc/md/examples/count-v2.did @@ -1,4 +1,3 @@ service : { - inc: () -> (int); - read: () -> (int) query; -} + increment : () -> (); +}; diff --git a/doc/md/examples/count-v2.mo b/doc/md/examples/count-v2.mo index 95c29027c77..cc288c81b86 100644 --- a/doc/md/examples/count-v2.mo +++ b/doc/md/examples/count-v2.mo @@ -1,11 +1,10 @@ -actor Counter_v2 { +import Debug "mo:base/Debug"; +actor Counter_v2 { stable var state : Int = 0; - public func inc() : async Int { + public func increment() : async () { state += 1; - return state; + Debug.print(debug_show (state)); }; - - public query func read() : async Int { return state; } -} +}; diff --git a/doc/md/examples/count-v3.did b/doc/md/examples/count-v3.did index 902cf3b1469..cef4206e459 100644 --- a/doc/md/examples/count-v3.did +++ b/doc/md/examples/count-v3.did @@ -1,4 +1,5 @@ service : { - inc: () -> (nat); - read: () -> (nat) query; -} + increment : () -> (); + decrement : () -> (); + read : () -> (int) query; +}; diff --git a/doc/md/examples/count-v3.mo b/doc/md/examples/count-v3.mo index e9cb87d5c1d..aba6e495f3f 100644 --- a/doc/md/examples/count-v3.mo +++ b/doc/md/examples/count-v3.mo @@ -1,11 +1,15 @@ actor Counter_v3 { + stable var state : Int = 0; - stable var state : Nat = 0; - - public func inc() : async Nat { + public func increment() : async () { state += 1; - return state; }; - public query func read() : async Nat { return state; } -} + public func decrement() : async () { + state -= 1; + }; + + public query func read() : async Int { + return state; + }; +}; diff --git a/doc/md/examples/count-v3.most b/doc/md/examples/count-v3.most index 302a837fa10..31801da83fa 100644 --- a/doc/md/examples/count-v3.most +++ b/doc/md/examples/count-v3.most @@ -1,3 +1,3 @@ actor { - stable var state : Nat + stable var state : Int }; diff --git a/doc/md/examples/count-v4.mo b/doc/md/examples/count-v4.mo index f12e2fba784..22234fc879a 100644 --- a/doc/md/examples/count-v4.mo +++ b/doc/md/examples/count-v4.mo @@ -1,14 +1,17 @@ -import Int "mo:base/Int"; +import Float "mo:base/Float"; actor Counter_v4 { + stable var state : Float = 0.0; - stable var state : Int = 0; - stable var newState : Nat = Int.abs(state); + public func increment() : async () { + state += 0.5; + }; - public func inc() : async Nat { - newState += 1; - return newState; + public func decrement() : async () { + state -= 0.5; }; - public query func read() : async Nat { return newState; } -} + public query func read() : async Float { + return state; + }; +}; diff --git a/doc/md/examples/count-v5.mo b/doc/md/examples/count-v5.mo new file mode 100644 index 00000000000..58843e7b29f --- /dev/null +++ b/doc/md/examples/count-v5.mo @@ -0,0 +1,23 @@ +import Debug "mo:base/Debug"; +import Float "mo:base/Float"; + +actor Counter_v5 { + stable var state : Int = 0; + stable var newState : Float = Float.fromInt(state); + + public func increment() : async () { + newState += 0.5; + }; + + public func decrement() : async () { + newState -= 0.5; + }; + + public query func read() : async Int { + Debug.trap("No longer supported: Use `readFloat`"); + }; + + public query func readFloat() : async Float { + return newState; + }; +}; diff --git a/doc/md/examples/count-v6.mo b/doc/md/examples/count-v6.mo new file mode 100644 index 00000000000..e0c6f0df284 --- /dev/null +++ b/doc/md/examples/count-v6.mo @@ -0,0 +1,22 @@ +import Debug "mo:base/Debug"; +import Float "mo:base/Float"; + +actor Counter_v6 { + stable var newState : Float = 0.0; + + public func increment() : async () { + newState += 0.5; + }; + + public func decrement() : async () { + newState -= 0.5; + }; + + public query func read() : async Int { + Debug.trap("No longer supported: Use `readFloat`"); + }; + + public query func readFloat() : async Float { + return newState; + }; +}; diff --git a/doc/md/getting-started/motoko-introduction.md b/doc/md/getting-started/motoko-introduction.md index b2dd2f28a69..b4d450cade0 100644 --- a/doc/md/getting-started/motoko-introduction.md +++ b/doc/md/getting-started/motoko-introduction.md @@ -18,6 +18,8 @@ Motoko provides: - An **actor-based** programming model optimized for efficient message handling. +- **Orthogonal persistence** for simple, safe, and efficient data storage without a database or secondary memory. + - An interpreter and compiler that you can use to test and compile the WebAssembly code for autonomous applications. # Why Motoko? @@ -87,9 +89,7 @@ Motoko provides numerous features to help you leverage orthogonal persistence, i For example, Motoko lets you declare certain variables as `stable`. The values of `stable` variables are automatically preserved across canister upgrades. -Once upgraded, the new interface is compatible with the previous one, meaning existing clients referencing the canister will continue to work, but new clients will be able to exploit its upgraded functionality. - -For scenarios that can’t be solved using stable variables alone, Motoko provides user-definable upgrade hooks that run immediately before and after upgrade, and allow you to migrate arbitrary state to stable variables. +Once upgraded, the new interface is compatible with the previous one, meaning existing clients referencing the canister will continue to work, but new clients will be able to exploit its upgraded functionality. For more complex changes of any kind, Motoko provides a safe way of user-defined [explicit migration](../canister-maintenance/compatibility.md#explicit-migration). ## Getting started diff --git a/doc/md/reference/compiler-ref.md b/doc/md/reference/compiler-ref.md index b43f9d3f139..f98aad255d0 100644 --- a/doc/md/reference/compiler-ref.md +++ b/doc/md/reference/compiler-ref.md @@ -32,14 +32,19 @@ You can use the following options with the `moc` command. | `--args0 ` | Read additional `NUL` separated command line arguments from ``. | | `-c` | Compile to WebAssembly. | | `--check` | Performs type checking only. | +| `--compacting-gc` | Use compacting GC (not supported with enhanced orthogonal persistence). | +| `--copying-gc` | Use copying GC (default with classical persistence, not supported with enhanced orthogonal persistence). | | `--debug` | Respects debug expressions in the source (the default). | +| `--enhanced-orthogonal-persistence` | Use enhanced orthogonal persistence (experimental): Scalable and fast upgrades using a persistent 64-bit main memory. | | `--error-detail ` | Set level of error message detail for syntax errors, n in \[0..3\] (default 2). | | `--experimental-stable-memory ` | Select support for the deprecated `ExperimentalStableMemory.mo` library (n < 0: error, n = 0: warn, n > 0: allow) (default 0). | | `-fno-shared-code` | Do not share low-level utility code: larger code size but decreased cycle consumption (default). | +| `--generational-gc` | Use generational GC (not supported with enhanced orthogonal persistence) | | `-fshared-code` | Do share low-level utility code: smaller code size but increased cycle consumption. | | `-help`,`--help` | Displays usage information. | | `--hide-warnings` | Hides compiler warnings. | | `-Werror` | Treat warnings as errors. | +| `--incremental-gc` | Use incremental GC (default of enhanced orthogonal persistence, also available for classical persistence) | | `--idl` | Compile binary and emit Candid IDL specification to `.did` file. | | `-i` | Runs the compiler in an interactive read–eval–print loop (REPL) shell so you can evaluate program execution (implies -r). | | `--map` | Outputs a JavaScript source map. | @@ -57,6 +62,7 @@ You can use the following options with the `moc` command. | `--stable-regions` | Force eager initialization of stable regions metadata (for testing purposes); consumes between 386KiB or 8MiB of additional physical stable memory, depending on current use of ExperimentalStableMemory. | | `--stable-types` | Compile binary and emit signature of stable types to `.most` file. | | `--stable-compatible
 `        | Test upgrade compatibility between stable-type signatures `
` and ``.                                                                       |
+| `--rts-stack-pages `                   | Set maximum number of pages available for runtime system stack (only supported with classical persistence, default 32).                               |
 | `--trap-on-call-error`                    | Trap, don't throw an [`Error`](../base/Error.md), when an IC call fails due to destination queue full or freezing threshold is crossed.               |
 |                                           | Emulates behaviour of moc versions < 0.8.0.                                                                                                           |
 | `-t`                                      | Activates tracing in interpreter.                                                                                                                     |
diff --git a/doc/md/stable-memory/stable-regions.md b/doc/md/stable-memory/stable-regions.md
index 85850008559..9b6c8b0ddf1 100644
--- a/doc/md/stable-memory/stable-regions.md
+++ b/doc/md/stable-memory/stable-regions.md
@@ -8,19 +8,13 @@ sidebar_position: 1
 
 The `Region` library provides low-level access to ICP stable memory feature.
 
-
+Stable regions were historically introduced with [classical orthogonal persistence](../canister-maintenance/orthogonal-persistence/classical.md) to allow larger scaled data to be retained across upgrades. For this purpose, programmers could explicitly store persistent data in stable memory, with regions helping to isolate different instances using stable memory.
 
-Motoko stable variables require serialization and deserialization on [upgrade](../canister-maintenance/upgrades.md). During an upgrade, the current values of stable variables are first saved to the ICP stable memory, then restored from stable memory after the new code is installed. This mechanism, however, does not scale to canisters that maintain large amounts of data in stable variables. There may not be enough cycle budget to store then restore all stable variables within an upgrade, resulting in failed upgrades.
-
-Due to the current 32-bit address space of Motoko, stable variables cannot store more than 4GiB of data.
-
-Additionally, some stable variables use a representation that is not itself `stable`, requiring a non-trivial pre-upgrade routine to pre-process the data into a `stable` form.  These pre-upgrade steps are critical, and if they trap for any reason, the Motoko canister may be stuck in an evolutionary dead-end, unable to upgrade.
+This is superseded by [enhanced orthogonal persistence](../canister-maintenance/orthogonal-persistence/enhanced.md). Nevertheless, regions are still offered for backwards-compatibility and for specific use cases where developers prefer to manage data explicitly in a persistent linear memory.
 
 ## The `Region` library
 
-To avoid these upgrade hazards, actors can elect to use the [ExperimentalStableMemory](stablememory.md) or [`Region`](../base/Region.md) libraries in package `base`. The `Region` library allows the programmer to incrementally allocate pages of 64-bit stable memory and use those pages to incrementally read and write data in a user-defined binary format.
+The [`Region`](../base/Region.md) library in package `base` allows the programmer to incrementally allocate pages of 64-bit stable memory and use those pages to incrementally read and write data in a user-defined binary format.
 
 Several pages may be allocated at once, with each page containing 64KiB. Allocation may fail due to resource limits imposed by ICP. Pages are zero-initialized.
 
@@ -81,6 +75,16 @@ module {
 }
 ```
 
+:::danger
+A stable region exposes low-level linear memory and it is the programmer's task to properly manipulate and interpret this data.
+This can be very error-prone when managing data in a stable region.
+However, the safety of Motoko's native values heap objects is always guaranteed, independent of the stable region content.
+::: 
+
+:::note
+The cost of accessing stable regions is significantly higher than using Motoko's native memory, i.e. regular Motoko values and objects.
+:::
+
 ## Example
 
 To demonstrate the `Region` library, the following is a simple implementation of a logging actor that records text messages in a scalable, persistent log.
diff --git a/nix/default.nix b/nix/default.nix
index 7e266fa6ea8..4823456b5d3 100644
--- a/nix/default.nix
+++ b/nix/default.nix
@@ -108,9 +108,9 @@ let
           };
         })
 
-        # Rust 1.69
+        # Rust stable
         (self: super: let
-          rust-channel = self.moz_overlay.rustChannelOf { date = "2023-04-20"; channel = "stable"; };
+          rust-channel = self.moz_overlay.rustChannelOf { version = "1.78.0"; channel = "stable"; };
         in {
           rustPlatform_moz_stable = self.makeRustPlatform {
             rustc = rust-channel.rust;
diff --git a/nix/drun.nix b/nix/drun.nix
index 267e4810cca..7c69c049063 100644
--- a/nix/drun.nix
+++ b/nix/drun.nix
@@ -1,6 +1,6 @@
 pkgs:
 { drun =
-    pkgs.rustPlatform.buildRustPackage {
+    pkgs.rustPlatform_moz_stable.buildRustPackage {
       name = "drun";
 
       src = pkgs.sources.ic;
@@ -19,12 +19,10 @@ pkgs:
         lockFile = "${pkgs.sources.ic}/Cargo.lock";
         outputHashes = {
           "build-info-0.0.27" = "sha256-SkwWwDNrTsntkNiCv6rsyTFGazhpRDnKtVzPpYLKF9U=";
-          "derive_more-0.99.8-alpha.0" = "sha256-tEsfYC9oCAsDjinCsUDgRg3q6ruvayuA1lRmsEP9cys=";
-          "ic-btc-interface-0.1.0" = "sha256-JoVg1t62C2FIe0la1oQzidybLj1CyAQy80gkRh/MTn0=";
-          "ic-btc-test-utils-0.1.0" = "sha256-VecEMFjoeiRi0VgJ9CeDoOzdyJbJNiZ5MBmiV1+b7As=";
-          "icrc1-test-env-0.1.1" = "sha256-yWJF+KM8l65Nr0pwR9QeltkqbHDzOLNPVnLhf1mRukQ=";
+          "cloudflare-0.11.0" = "sha256-bJYiypmDI4KEy/VWt/7UcOv+g2CZLb9qUA9c1xlLxhM=";
+          "ic-agent-0.36.0" = "sha256-vDONIVjz0cwVgiszVRIjTKcqRUMHdVwTURflAMqmzHM=";
+          "icrc1-test-env-0.1.1" = "sha256-2PB7e64Owin/Eji3k8UoeWs+pfDfOOTaAyXjvjOZ/4g=";
           "jsonrpc-0.12.1" = "sha256-3FtdZlt2PqVDkE5iKWYIp1eiIELsaYlUPRSP2Xp8ejM=";
-          "libssh2-sys-0.2.23" = "sha256-9Hb7CnPF+lxrVO1NAhS7EXcPVWZutJXr6UWxpptzk4U=";
           "lmdb-rkv-0.14.99" = "sha256-5WcUzapkrc/s3wCBNCuUDhtbp17n67rTbm2rx0qtITg=";
         };
       };
@@ -43,9 +41,64 @@ pkgs:
 
      for file in lib_sources {
 EOF
-
         cd -
 
+        # Disable DTS for `drun`
+        patch rs/config/src/subnet_config.rs << EOF
+@@ -290,9 +290,9 @@ impl SchedulerConfig {
+     }
+ 
+     pub fn system_subnet() -> Self {
+-        let max_instructions_per_message_without_dts = NumInstructions::from(50 * B);
++        let max_instructions_per_message_without_dts =
++            MAX_INSTRUCTIONS_PER_MESSAGE_WITHOUT_DTS * SYSTEM_SUBNET_FACTOR;
+         let max_instructions_per_install_code = NumInstructions::from(1_000 * B);
+-        let max_instructions_per_slice = NumInstructions::from(10 * B);
+         Self {
+             scheduler_cores: NUMBER_OF_EXECUTION_THREADS,
+             max_paused_executions: MAX_PAUSED_EXECUTIONS,
+@@ -300,20 +300,19 @@ impl SchedulerConfig {
+             // TODO(RUN-993): Enable heap delta rate limiting for system subnets.
+             // Setting initial reserve to capacity effectively disables the rate limiting.
+             heap_delta_initial_reserve: SUBNET_HEAP_DELTA_CAPACITY,
+-            // Round limit is set to allow on average 2B instructions.
+-            // See also comment about \`MAX_INSTRUCTIONS_PER_ROUND\`.
+-            max_instructions_per_round: max_instructions_per_message_without_dts
+-                .max(max_instructions_per_slice)
+-                + NumInstructions::from(2 * B),
++            max_instructions_per_round: MAX_INSTRUCTIONS_PER_ROUND * SYSTEM_SUBNET_FACTOR,
++            // Effectively disable DTS on system subnets.
+             max_instructions_per_message: max_instructions_per_message_without_dts,
+             max_instructions_per_message_without_dts,
+-            max_instructions_per_slice,
++            // Effectively disable DTS on system subnets.
++            max_instructions_per_slice: max_instructions_per_message_without_dts,
+             instruction_overhead_per_execution: INSTRUCTION_OVERHEAD_PER_EXECUTION,
+             instruction_overhead_per_canister: INSTRUCTION_OVERHEAD_PER_CANISTER,
+             instruction_overhead_per_canister_for_finalization:
+                 INSTRUCTION_OVERHEAD_PER_CANISTER_FOR_FINALIZATION,
+             max_instructions_per_install_code,
+-            max_instructions_per_install_code_slice: max_instructions_per_slice,
++            // Effectively disable DTS on system subnets.
++            max_instructions_per_install_code_slice: max_instructions_per_install_code,
+             max_heap_delta_per_iteration: MAX_HEAP_DELTA_PER_ITERATION * SYSTEM_SUBNET_FACTOR,
+             max_message_duration_before_warn_in_seconds:
+                 MAX_MESSAGE_DURATION_BEFORE_WARN_IN_SECONDS,
+EOF
+
+        # static linking of libunwind fails under nix Linux
+        patch rs/monitoring/backtrace/build.rs << EOF
+@@ -1,8 +1,2 @@
+ fn main() {
+-    if std::env::var("TARGET").unwrap() == "x86_64-unknown-linux-gnu" {
+-        println!("cargo:rustc-link-lib=static=unwind");
+-        println!("cargo:rustc-link-lib=static=unwind-ptrace");
+-        println!("cargo:rustc-link-lib=static=unwind-x86_64");
+-        println!("cargo:rustc-link-lib=dylib=lzma");
+-    }
+ }
+EOF
+
         mkdir -p .cargo
         cat > .cargo/config.toml << EOF
 [target.x86_64-apple-darwin]
diff --git a/nix/sources.json b/nix/sources.json
index 5668002540a..074cba24db2 100644
--- a/nix/sources.json
+++ b/nix/sources.json
@@ -21,15 +21,15 @@
         "version": "3.2.25"
     },
     "ic": {
-        "branch": "pull/143/head",
+        "branch": "luc/latest-ic-wasm64-test",
         "description": "Internet Computer blockchain source: the client/replica software run by nodes",
         "homepage": "",
-        "owner": "dfinity",
+        "owner": "luc-blaeser",
         "repo": "ic",
-        "rev": "31f02c1b08aef1c6ee4c1b6bb9d25c21c5a4ff41",
-        "sha256": "04k278900kv082axrgv6vga0n8sw6c3lcc9298ih9gjyx8hanfb1",
+        "rev": "7921f5f3dc0d9fb774e3222f8ff6b1c00a086f1a",
+        "sha256": "1ykawbpaqnf1y508vh81m30p813ykmnbffxc3p0hw0p0k1ynq6zz",
         "type": "tarball",
-        "url": "https://github.com/dfinity/ic/archive/31f02c1b08aef1c6ee4c1b6bb9d25c21c5a4ff41.tar.gz",
+        "url": "https://github.com/luc-blaeser/ic/archive/7921f5f3dc0d9fb774e3222f8ff6b1c00a086f1a.tar.gz",
         "url_template": "https://github.com///archive/.tar.gz"
     },
     "ic-hs": {
@@ -72,13 +72,13 @@
     "motoko-base": {
         "branch": "next-moc",
         "description": "The Motoko base library",
-        "homepage": null,
+        "homepage": "",
         "owner": "dfinity",
         "repo": "motoko-base",
-        "rev": "4abc98e92909d7b178629912057473302cf0d8be",
-        "sha256": "09yhfk17rb9xk2cvi6353hfb43df05pg0x1gw5qrf8n0g0dvd3jx",
+        "rev": "dfabfc3201950263a0057f5d1a78d2ede5c87478",
+        "sha256": "1wd9rcmhg82h08l3gz3gmn2xpgalvf679w7yz2g35yzbg2b5g2fd",
         "type": "tarball",
-        "url": "https://github.com/dfinity/motoko-base/archive/4abc98e92909d7b178629912057473302cf0d8be.tar.gz",
+        "url": "https://github.com/dfinity/motoko-base/archive/dfabfc3201950263a0057f5d1a78d2ede5c87478.tar.gz",
         "url_template": "https://github.com///archive/.tar.gz"
     },
     "motoko-matchers": {
@@ -133,6 +133,18 @@
         "url": "https://github.com/mozilla/nixpkgs-mozilla/archive/1ca9ee7192f973fd67b0988bdd77b8c11ae245a6.tar.gz",
         "url_template": "https://github.com///archive/.tar.gz"
     },
+    "nixpkgs-unstable": {
+        "branch": "master",
+        "description": "Nix Packages collection & NixOS",
+        "homepage": "",
+        "owner": "NixOS",
+        "repo": "nixpkgs",
+        "rev": "9d2864289e65d0dc841123ae86bc7b3f77b6d12d",
+        "sha256": "0f3bcphzrjm1hs81mhpfx1hqxfnbvhhv5hv82wi1h3f735i279ja",
+        "type": "tarball",
+        "url": "https://github.com/NixOS/nixpkgs/archive/9d2864289e65d0dc841123ae86bc7b3f77b6d12d.tar.gz",
+        "url_template": "https://github.com///archive/.tar.gz"
+    },
     "ocaml-vlq": {
         "branch": "v0.2.0",
         "builtin": false,
diff --git a/perf-delta.nix b/perf-delta.nix
index 3a6ebed3693..d4ede1f6236 100644
--- a/perf-delta.nix
+++ b/perf-delta.nix
@@ -37,7 +37,7 @@ let
         do
           # ignore all errors
           echo -n $file
-          if timeout 10s moc $file --omit-metadata motoko:compiler --force-gc --compacting-gc -no-check-ir -ref-system-api -o $file.wasm 2>/dev/null
+          if timeout 10s moc $file --omit-metadata motoko:compiler -no-check-ir -ref-system-api -o $file.wasm 2>/dev/null
           then echo " ok"
           else echo " failed (ignored)"
           fi
diff --git a/rts/Makefile b/rts/Makefile
index 052952ee45d..7f55ab3eff1 100644
--- a/rts/Makefile
+++ b/rts/Makefile
@@ -21,7 +21,7 @@ TOMMATHFILES = \
    s_mp_mul_digs_fast s_mp_mul_digs mp_init_multi mp_clear_multi mp_mul_2 mp_div_2 mp_div_3 mp_lshd mp_incr \
    mp_decr mp_add_d mp_sub_d
 
-MUSLFILES = \
+MUSLFILES_32 = \
   pow pow_data sin cos tan asin acos atan atan2 exp exp_data log log_data fmod \
   floor scalbn frexp strlen strnlen memcpy memset memchr memcmp snprintf vsnprintf vfprintf \
   __math_oflow __math_uflow __math_xflow __math_divzero __math_invalid \
@@ -37,8 +37,7 @@ MUSL_WASI_SYSROOT ?= $(MUSLSRC)/../../sysroot
 # manager
 #
 
-TOMMATH_FLAGS = \
-  -DMP_32BIT \
+TOMMATH_FLAGS_COMMON = \
   -DMP_MALLOC=mp_malloc \
   -DMP_REALLOC=mp_realloc \
   -DMP_CALLOC=mp_calloc \
@@ -47,7 +46,13 @@ TOMMATH_FLAGS = \
   -DMP_FIXED_CUTOFFS \
   -DMP_PREC=4 \
   -DMP_NO_FILE \
-  -D__STDC_IEC_559__ \
+  -D__STDC_IEC_559__
+
+TOMMATH_FLAGS_32 = $(TOMMATH_FLAGS_COMMON) \
+  -DMP_32BIT
+
+TOMMATH_FLAGS_64 = $(TOMMATH_FLAGS_COMMON) \
+  -DMP_64BIT
 
 # Note: the above __STDC_IEC_559__ define is somewhat of a misnomer
 #       as only IEEE 754 features are used.
@@ -63,11 +68,11 @@ TOMMATH_FLAGS = \
 #       and will need tweaks/additions below.
 #       Similarly we define include guards (to suppress certain headers), but those should be
 #       pretty stable.
-#       TODO: run `wasm2wat mo-rts.wasm | grep -F '(import' | grep __fwritex_` expecting empty.
+#       TODO: run `wasm2wat --enable-memory64 mo-rts-eop.wasm | grep -F '(import' | grep __fwritex_` expecting empty.
 #
 #       See also https://stackoverflow.com/questions/1597007/creating-c-macro-with-and-line-token-concatenation-with-positioning-macr
 
-MUSL_FLAGS = \
+MUSL_FLAGS_32 = \
   -isystem $(MUSLSRC)/arch/wasm32 \
   -isystem $(MUSLSRC)/src/include \
   -isystem $(MUSLSRC)/src/internal \
@@ -91,66 +96,77 @@ MUSL_FLAGS = \
   -D__NEED_locale_t \
   -Dsqrt=__builtin_sqrt \
   -Dfabs=__builtin_fabs
-
 #
 # clang flags
 #
 
-CLANG_FLAGS = \
-   --compile \
+CLANG_FLAGS_COMMON = \
+	--compile \
    -fpic \
    -fvisibility=hidden \
    --std=c11 \
-   --target=wasm32-emscripten \
    -fno-builtin -ffreestanding \
    --optimize=s \
    -resource-dir=$(wildcard $(WASM_CLANG_LIB)/lib/clang/*)
 
+CLANG_FLAGS_32 = $(CLANG_FLAGS_COMMON) \
+   --target=wasm32-emscripten
+
+CLANG_FLAGS_64 = $(CLANG_FLAGS_COMMON) \
+   --target=wasm64
+
 #
 # Build targets
 #
 
 .PHONY: all
 
-all: mo-rts.wasm mo-rts-debug.wasm
+all: mo-rts-non-incremental.wasm mo-rts-non-incremental-debug.wasm mo-rts-incremental.wasm mo-rts-incremental-debug.wasm mo-rts-eop.wasm mo-rts-eop-debug.wasm
 
 _build:
 	mkdir -p $@
 
-_build/wasm:
+_build/wasm64:
 	mkdir -p $@
 
-_build/i686:
+_build/wasm32:
 	mkdir -p $@
 
+# 
+# Common configuration
+# 
+
+RTS_EMSCRIPTEN_64=emcc -c --target=wasm64-unknown-emscripten -fpic
+RTS_EMSCRIPTEN_64_DEBUG=$(RTS_EMSCRIPTEN_64)
+RTS_EMSCRIPTEN_64_RELEASE=$(RTS_EMSCRIPTEN_64) -O2
+
 #
-# Let make automatically search these directorys (tommath and musl) for .c files
+# Let make automatically search these directories (tommath and musl) for .c files
 #
 
 vpath %.c $(MUSLSRC)/src/math $(MUSLSRC)/src/stdio $(MUSLSRC)/src/string $(MUSLSRC)/src/ctype $(TOMMATHSRC)
 
-
 #
 # Building the libtommath files
 #
 
-TOMMATH_WASM_O=$(TOMMATHFILES:%=_build/wasm/tommath_%.o)
-TOMMATH_WASM_A=_build/libtommath.a
+TOMMATH_WASM_32_O=$(TOMMATHFILES:%=_build/wasm32/tommath_%.o)
+TOMMATH_WASM_32_A=_build/libtommath_wasm32.a
+
+TOMMATH_WASM_64_O=$(TOMMATHFILES:%=_build/wasm64/tommath_%.o)
+TOMMATH_WASM_64_A=_build/libtommath_wasm64.a
 
-TOMMATH_i686_O=$(TOMMATHFILES:%=_build/i686/tommath_%.o)
-TOMMATH_i686_A=_build/libtommath_i686.a
+_build/wasm32/tommath_%.o: bn_%.c | _build/wasm32
+	$(WASM_CLANG) $(CLANG_FLAGS_32) $(TOMMATH_FLAGS_32) $< --output $@
 
-_build/wasm/tommath_%.o: bn_%.c | _build/wasm
-	$(WASM_CLANG) $(CLANG_FLAGS) $(TOMMATH_FLAGS) $< --output $@
+_build/wasm64/tommath_%.o: bn_%.c | _build/wasm64
+	$(WASM_CLANG) $(CLANG_FLAGS_64) $(TOMMATH_FLAGS_64) $< --output $@
 
-$(TOMMATH_WASM_A): $(TOMMATH_WASM_O)
+$(TOMMATH_WASM_32_A): $(TOMMATH_WASM_32_O)
 	llvm-ar rcs $@ $^
 	llvm-ranlib $@
 
-_build/i686/tommath_%.o: bn_%.c | _build/i686
-	$(WASM_CLANG) $(CLANG_FLAGS) $(TOMMATH_FLAGS) --target=i686-unknown-linux $< --output $@
-
-$(TOMMATH_i686_A): $(TOMMATH_i686_O)
+$(TOMMATH_WASM_64_A): $(TOMMATH_WASM_64_O)
 	llvm-ar rcs $@ $^
 	llvm-ranlib $@
 
@@ -158,14 +174,13 @@ $(TOMMATH_i686_A): $(TOMMATH_i686_O)
 # Building the musl files
 #
 
-MUSL_WASM_O=$(MUSLFILES:%=_build/wasm/musl_%.o)
-MUSL_WASM_A=_build/libmusl.a
-
-_build/wasm/musl_%.o: %.c | _build/wasm
-	$(WASM_CLANG) $(CLANG_FLAGS) $(MUSL_FLAGS) $< --output $@
+MUSL_WASM_32_O=$(MUSLFILES_32:%=_build/wasm32/musl_%.o)
+MUSL_WASM_32_A=_build/libmusl_wasm32.a
 
+_build/wasm32/musl_%.o: %.c | _build/wasm32
+	$(WASM_CLANG) $(CLANG_FLAGS_32) $(MUSL_FLAGS_32) $< --output $@
 
-$(MUSL_WASM_A): $(MUSL_WASM_O)
+$(MUSL_WASM_32_A): $(MUSL_WASM_32_O)
 	llvm-ar rcs $@ $^
 	llvm-ranlib $@
 
@@ -177,12 +192,12 @@ $(MUSL_WASM_A): $(MUSL_WASM_O)
 RTS_RUST_FILES=$(shell ls **/*.rs)
 RTS_CARGO_FILES=$(shell ls **/Cargo.toml)
 
-TOMMATH_BINDINGS_RS=_build/tommath_bindings.rs
+TOMMATH_BINDINGS_RS_32=_build/wasm32/tommath_bindings.rs
+TOMMATH_BINDINGS_RS_64=_build/wasm64/tommath_bindings.rs
 
-$(TOMMATH_BINDINGS_RS): | _build
-	bindgen $(TOMMATHSRC)/tommath.h \
-	    -o $@ \
-	    --use-core --ctypes-prefix=libc --no-layout-tests \
+TOMMATH_BIND_OPTIONS = \
+		--use-core \
+		--no-layout-tests \
 	    --allowlist-function mp_init \
 	    --allowlist-function mp_init_copy \
 	    --allowlist-function mp_set_u32 \
@@ -210,7 +225,21 @@ $(TOMMATH_BINDINGS_RS): | _build
 	    --blocklist-type __int64_t \
 	    --blocklist-type __uint32_t \
 	    --blocklist-type __uint64_t \
-            -- $(TOMMATH_FLAGS)
+
+
+$(TOMMATH_BINDINGS_RS_32): | _build/wasm32
+	bindgen $(TOMMATHSRC)/tommath.h \
+	    -o $@ \
+	    --ctypes-prefix=libc \
+		$(TOMMATH_BIND_OPTIONS) \
+		-- $(TOMMATH_FLAGS_32)
+
+$(TOMMATH_BINDINGS_RS_64): | _build/wasm64
+	bindgen $(TOMMATHSRC)/tommath.h \
+	    -o $@ \
+	    --ctypes-prefix="crate::libc_declarations" \
+		$(TOMMATH_BIND_OPTIONS) \
+		-- $(TOMMATH_FLAGS_64)
 
 	# Whitelist parameters used as libtommath.h has lots of definitions that we don't
 	# need. Blacklist parameters are used because bindgen still generates unused type
@@ -219,43 +248,160 @@ $(TOMMATH_BINDINGS_RS): | _build
 	# Note that bindgen can't generate Rust macros or functions for CPP macros, so
 	# macros like `mp_get_u32` and `mp_isneg` need to be manually implemented.
 
-RTS_DEPENDENCIES=$(TOMMATH_BINDINGS_RS) $(RTS_RUST_FILES) $(RTS_CARGO_FILES) | _build/wasm
-RTS_BUILD=cd motoko-rts && cargo build --target=wasm32-unknown-emscripten -Zbuild-std=core,alloc
-RTS_DEBUG_BUILD=$(RTS_BUILD)
-RTS_RELEASE_BUILD=$(RTS_BUILD) --release
-RTS_DEBUG_TARGET=motoko-rts/target/wasm32-unknown-emscripten/debug/libmotoko_rts.a
-RTS_RELEASE_TARGET=motoko-rts/target/wasm32-unknown-emscripten/release/libmotoko_rts.a
+# 32-bit Wasm builds
+
+RTS_DEPENDENCIES_32=$(TOMMATH_BINDINGS_RS_32) $(RTS_RUST_FILES) $(RTS_CARGO_FILES) | _build/wasm32
+RTS_BUILD_32=cd motoko-rts && cargo build --target=wasm32-unknown-emscripten -Zbuild-std=core,alloc
+RTS_DEBUG_BUILD_32=$(RTS_BUILD_32)
+RTS_RELEASE_BUILD_32=$(RTS_BUILD_32) --release
+RTS_DEBUG_TARGET_32=motoko-rts/target/wasm32-unknown-emscripten/debug/libmotoko_rts.a
+RTS_RELEASE_TARGET_32=motoko-rts/target/wasm32-unknown-emscripten/release/libmotoko_rts.a
+
+RTS_RUST_NON_INCREMENTAL_WASM_32_A=_build/wasm32/libmotoko_rts.a
+RTS_RUST_NON_INCREMENTAL_DEBUG_WASM_32_A=_build/wasm32/libmotoko_rts_debug.a
+
+RTS_RUST_INCREMENTAL_WASM_32_A=_build/wasm32/libmotoko_rts_incremental.a
+RTS_RUST_INCREMENTAL_DEBUG_WASM_32_A=_build/wasm32/libmotoko_rts_incremental_debug.a
+
+$(RTS_RUST_NON_INCREMENTAL_WASM_32_A): $(RTS_DEPENDENCIES_32)
+	$(RTS_RELEASE_BUILD_32) --features classical_persistence
+	cp $(RTS_RELEASE_TARGET_32) $@
+
+$(RTS_RUST_NON_INCREMENTAL_DEBUG_WASM_32_A): $(RTS_DEPENDENCIES_32)
+	$(RTS_DEBUG_BUILD_32) --features classical_persistence
+	cp $(RTS_DEBUG_TARGET_32) $@
+
+$(RTS_RUST_INCREMENTAL_WASM_32_A): $(RTS_DEPENDENCIES_32)
+	$(RTS_RELEASE_BUILD_32) --features classical_persistence,incremental_gc
+	cp $(RTS_RELEASE_TARGET_32) $@
+
+$(RTS_RUST_INCREMENTAL_DEBUG_WASM_32_A): $(RTS_DEPENDENCIES_32)
+	$(RTS_DEBUG_BUILD_32) --features classical_persistence,incremental_gc
+	cp $(RTS_DEBUG_TARGET_32) $@
+
+# 64-bit Wasm builds
+
+RTS_DEPENDENCIES_64=$(TOMMATH_BINDINGS_RS_64) $(RTS_RUST_FILES) $(RTS_CARGO_FILES) | _build/wasm64
+COMPILER_FLAGS_64=--emit=llvm-ir
+RTS_BUILD_64=cd motoko-rts && RUSTFLAGS="${COMPILER_FLAGS_64}" cargo build --target=wasm64-unknown-unknown -Zbuild-std=core,alloc --features enhanced_orthogonal_persistence
+RTS_DEBUG_BUILD_64=$(RTS_BUILD_64)
+RTS_RELEASE_BUILD_64=$(RTS_BUILD_64) --release
+
+RTS_DEBUG_TARGET_64_FOLDER=motoko-rts/target/wasm64-unknown-unknown/debug
+RTS_RELEASE_TARGET_64_FOLDER=motoko-rts/target/wasm64-unknown-unknown/release
+
+RTS_DEBUG_TARGET_64=$(RTS_DEBUG_TARGET_64_FOLDER)/deps/motoko_rts-*.ll
+RTS_RELEASE_TARGET_64=$(RTS_RELEASE_TARGET_64_FOLDER)/deps/motoko_rts-*.ll
+
+CORE_DEBUG_TARGET_64=$(RTS_DEBUG_TARGET_64_FOLDER)/deps/core-*.ll
+CORE_RELEASE_TARGET_64=$(RTS_RELEASE_TARGET_64_FOLDER)/deps/core-*.ll
+
+ALLOC_DEBUG_TARGET_64=$(RTS_DEBUG_TARGET_64_FOLDER)/deps/alloc-*.ll
+ALLOC_RELEASE_TARGET_64=$(RTS_RELEASE_TARGET_64_FOLDER)/deps/alloc-*.ll
+
+COMPILER_BUILTINS_DEBUG_TARGET_64=$(RTS_DEBUG_TARGET_64_FOLDER)/deps/compiler_builtins-*.ll
+COMPILER_BUILTINS_RELEASE_TARGET_64=$(RTS_RELEASE_TARGET_64_FOLDER)/deps/compiler_builtins-*.ll
+
+BUILD_64_FOLDER=_build/wasm64
+
+RTS_RUST_LLVM_IR_64=$(BUILD_64_FOLDER)/libmotoko_rts.ll
+RTS_RUST_DEBUG_LLVM_IR_64=$(BUILD_64_FOLDER)/libmotoko_rts_debug.ll
 
-RTS_RUST_WASM_A=_build/wasm/libmotoko_rts.a
-RTS_RUST_DEBUG_WASM_A=_build/wasm/libmotoko_rts_debug.a
+CORE_DEBUG_LLVM_IR_64=$(BUILD_64_FOLDER)/core_debug.ll
+CORE_RELEASE_LLVM_IR_64=$(BUILD_64_FOLDER)/core.ll
 
-$(RTS_RUST_WASM_A): $(RTS_DEPENDENCIES)
-	$(RTS_RELEASE_BUILD)
-	cp $(RTS_RELEASE_TARGET) $@
+ALLOC_DEBUG_LLVM_IR_64=$(BUILD_64_FOLDER)/alloc_debug.ll
+ALLOC_RELEASE_LLVM_IR_64=$(BUILD_64_FOLDER)/alloc.ll
 
-$(RTS_RUST_DEBUG_WASM_A): $(RTS_DEPENDENCIES)
-	$(RTS_DEBUG_BUILD)
-	cp $(RTS_DEBUG_TARGET) $@
+COMPILER_BUILTINS_DEBUG_LLVM_IR_64=$(BUILD_64_FOLDER)/compiler_builtins_debug.ll
+COMPILER_BUILTINS_RELEASE_LLVM_IR_64=$(BUILD_64_FOLDER)/compiler_builtins.ll
+
+$(RTS_RUST_LLVM_IR_64): $(RTS_DEPENDENCIES_64)
+	rm -rf $(RTS_RELEASE_TARGET_64_FOLDER)
+	$(RTS_RELEASE_BUILD_64)
+	cp $(CORE_RELEASE_TARGET_64) -T $(CORE_RELEASE_LLVM_IR_64)
+	cp $(ALLOC_RELEASE_TARGET_64) -T $(ALLOC_RELEASE_LLVM_IR_64)
+	cp $(COMPILER_BUILTINS_RELEASE_TARGET_64) -T $(COMPILER_BUILTINS_RELEASE_LLVM_IR_64)
+	cp $(RTS_RELEASE_TARGET_64) -T $@
+
+$(RTS_RUST_DEBUG_LLVM_IR_64): $(RTS_DEPENDENCIES_64)
+	rm -rf $(RTS_DEBUG_TARGET_64_FOLDER) 
+	$(RTS_DEBUG_BUILD_64)
+	cp $(CORE_DEBUG_TARGET_64) -T $(CORE_DEBUG_LLVM_IR_64)
+	cp $(ALLOC_DEBUG_TARGET_64) -T $(ALLOC_DEBUG_LLVM_IR_64)
+	cp $(COMPILER_BUILTINS_DEBUG_TARGET_64) -T $(COMPILER_BUILTINS_DEBUG_LLVM_IR_64)
+	cp $(RTS_DEBUG_TARGET_64) -T $@
+
+RTS_RUST_WASM_64_A=$(BUILD_64_FOLDER)/libmotoko_rts.o
+RTS_RUST_DEBUG_WASM_64_A=$(BUILD_64_FOLDER)/libmotoko_rts_debug.o
+
+CORE_WASM_64_A=$(BUILD_64_FOLDER)/core.o
+CORE_DEBUG_WASM_64_A=$(BUILD_64_FOLDER)/core_debug.o
+
+ALLOC_WASM_64_A=$(BUILD_64_FOLDER)/alloc.o
+ALLOC_DEBUG_WASM_64_A=$(BUILD_64_FOLDER)/alloc_debug.o
+
+COMPILER_BUILTINS_WASM_64_A=$(BUILD_64_FOLDER)/compiler_builtins.o
+COMPILER_BUILTINS_DEBUG_WASM_64_A=$(BUILD_64_FOLDER)/compiler_builtins_debug.o
+
+$(RTS_RUST_WASM_64_A): $(RTS_RUST_LLVM_IR_64)
+	$(RTS_EMSCRIPTEN_64_RELEASE) -o $@ $+
+
+$(RTS_RUST_DEBUG_WASM_64_A): $(RTS_RUST_DEBUG_LLVM_IR_64)
+	$(RTS_EMSCRIPTEN_64_DEBUG) -o $@ $+
+
+$(CORE_WASM_64_A): $(CORE_RELEASE_LLVM_IR_64)
+	$(RTS_EMSCRIPTEN_64_RELEASE) -o $@ $+
+
+$(CORE_DEBUG_WASM_64_A): $(CORE_DEBUG_LLVM_IR_64)
+	$(RTS_EMSCRIPTEN_64_DEBUG) -o $@ $+
+
+$(ALLOC_WASM_64_A): $(ALLOC_RELEASE_LLVM_IR_64)
+	$(RTS_EMSCRIPTEN_64_RELEASE) -o $@ $+
+
+$(ALLOC_DEBUG_WASM_64_A): $(ALLOC_DEBUG_LLVM_IR_64)
+	$(RTS_EMSCRIPTEN_64_DEBUG) -o $@ $+
+
+$(COMPILER_BUILTINS_WASM_64_A): $(COMPILER_BUILTINS_RELEASE_LLVM_IR_64)
+	$(RTS_EMSCRIPTEN_64_RELEASE) -o $@ $+
+
+$(COMPILER_BUILTINS_DEBUG_WASM_64_A): $(COMPILER_BUILTINS_DEBUG_LLVM_IR_64)
+	$(RTS_EMSCRIPTEN_64_DEBUG) -o $@ $+
 
 #
 # The test suite
 #
 
-TEST_DEPENDENCIES=$(TOMMATH_WASM_A) $(TOMMATH_BINDINGS_RS)
-TEST_BUILD=cd motoko-rts-tests && cargo build --target=wasm32-wasi
-TEST_RUN=wasmtime -C cache=n -W nan-canonicalization=y motoko-rts-tests/target/wasm32-wasi/debug/motoko-rts-tests.wasm
+TEST_DEPENDENCIES_32=$(TOMMATH_WASM_32_A) $(TOMMATH_BINDINGS_RS_32)
+TEST_BUILD_32=cd motoko-rts-tests && cargo build --target=wasm32-wasi
+TEST_RUN_32=wasmtime -C cache=n -W nan-canonicalization=y motoko-rts-tests/target/wasm32-wasi/debug/motoko-rts-tests.wasm
+
+TEST_DEPENDENCIES_64=$(TOMMATH_WASM_64_A) $(TOMMATH_BINDINGS_RS_64)
+TEST_BUILD_64=cd motoko-rts-tests && cargo build --target=wasm64-unknown-unknown -Zbuild-std=core,alloc,std,panic_abort --features enhanced_orthogonal_persistence
+TEST_RUN_64=wasmtime -C cache=n -W nan-canonicalization=y -W memory64 motoko-rts-tests/target/wasm64-unknown-unknown/debug/motoko-rts-tests.wasm
 
 .PHONY: test
 
-test: $(TEST_DEPENDENCIES)
-	$(TEST_BUILD)
-	$(TEST_RUN)
+test: test32-non-incremental test32-incremental test64
+
+test32-non-incremental: $(TEST_DEPENDENCIES_32)
+	$(TEST_BUILD_32) --features classical_persistence
+	$(TEST_RUN_32)
+
+test32-incremental: $(TEST_DEPENDENCIES_32)
+	$(TEST_BUILD_32) --features classical_persistence,incremental_gc
+	$(TEST_RUN_32)
+
+test64: $(TEST_DEPENDENCIES_64)
+	$(TEST_BUILD_64)
+	$(TEST_RUN_64)
 
 #
 # Putting it all together
 #
 
 # These symbols from musl are used by the code generator directly
+
 EXPORTED_SYMBOLS=\
   __wasm_call_ctors \
   memcpy \
@@ -270,22 +416,49 @@ EXPORTED_SYMBOLS=\
   cos \
   exp \
   fmod \
-  log \
+  log
 
-WASM_A_DEPENDENCIES=$(TOMMATH_WASM_A) $(MUSL_WASM_A)
-LINKER_OPTIONS=\
+WASM_A_DEPENDENCIES_32=$(TOMMATH_WASM_32_A) $(MUSL_WASM_32_A)
+LINKER_OPTIONS_32=\
   --import-memory --shared --no-entry --gc-sections \
   $(EXPORTED_SYMBOLS:%=--export=%) \
   --whole-archive
 
-mo-rts.wasm: $(RTS_RUST_WASM_A) $(WASM_A_DEPENDENCIES)
+WASM_A_DEPENDENCIES_64=$(TOMMATH_WASM_64_A)
+LINKER_OPTIONS_64=\
+  -mwasm64 --import-memory --shared --no-entry --gc-sections \
+  $(EXPORTED_SYMBOLS:%=--export=%) \
+  --whole-archive
+
+
+mo-rts-non-incremental.wasm: $(RTS_RUST_NON_INCREMENTAL_WASM_32_A) $(WASM_A_DEPENDENCIES_32)
+	$(WASM_LD) -o $@ \
+		$(LINKER_OPTIONS_32) \
+		$+
+
+mo-rts-non-incremental-debug.wasm: $(RTS_RUST_NON_INCREMENTAL_DEBUG_WASM_32_A) $(WASM_A_DEPENDENCIES_32)
+	$(WASM_LD) -o $@ \
+		$(LINKER_OPTIONS_32) \
+		$+
+
+mo-rts-incremental.wasm: $(RTS_RUST_INCREMENTAL_WASM_32_A) $(WASM_A_DEPENDENCIES_32)
+	$(WASM_LD) -o $@ \
+		$(LINKER_OPTIONS_32) \
+		$+
+
+mo-rts-incremental-debug.wasm: $(RTS_RUST_INCREMENTAL_DEBUG_WASM_32_A) $(WASM_A_DEPENDENCIES_32)
+	$(WASM_LD) -o $@ \
+		$(LINKER_OPTIONS_32) \
+		$+
+
+mo-rts-eop.wasm: $(RTS_RUST_WASM_64_A) $(CORE_WASM_64_A) $(ALLOC_WASM_64_A) $(COMPILER_BUILTINS_WASM_64_A) $(WASM_A_DEPENDENCIES_64)
 	$(WASM_LD) -o $@ \
-		$(LINKER_OPTIONS) \
+		$(LINKER_OPTIONS_64) \
 		$+
 
-mo-rts-debug.wasm: $(RTS_RUST_DEBUG_WASM_A) $(WASM_A_DEPENDENCIES)
+mo-rts-eop-debug.wasm: $(RTS_RUST_DEBUG_WASM_64_A) $(CORE_DEBUG_WASM_64_A) $(ALLOC_DEBUG_WASM_64_A) $(COMPILER_BUILTINS_DEBUG_WASM_64_A) $(WASM_A_DEPENDENCIES_64)
 	$(WASM_LD) -o $@ \
-		$(LINKER_OPTIONS) \
+		$(LINKER_OPTIONS_64) \
 		$+
 
 format:
@@ -300,4 +473,4 @@ clean:
 	  motoko-rts/target \
 	  motoko-rts-tests/target \
 	  motoko-rts-macros/target \
-	  motoko-rts/cargo-home
+	  motoko-rts/cargo-home \
diff --git a/rts/README.md b/rts/README.md
index 0afe4cd7d66..b50cef615ea 100644
--- a/rts/README.md
+++ b/rts/README.md
@@ -6,7 +6,7 @@ This directory contains the parts of the Motoko runtime implemented in Rust.
 tl;dr
 -----
 
-If you just want to get `mo-rts.wasm` in this directory, run
+If you just want to get RTS wasm files in this directory, run
 
     nix-shell --run 'make -C rts'
 
@@ -15,7 +15,7 @@ from the top-level directory of the Motoko repository.
 Compilation
 -----------
 
-Running `make` should produce `mo-rts.wasm`.
+Running `make` should produce RTS Wasm files (different versions).
 
 If run within `nix-shell`, the environment variables `WASM_CLANG` and `WASM_LD`
 should point to suitable binaries (we track a specific unreleased version of
diff --git a/rts/motoko-rts-macros/Cargo.lock b/rts/motoko-rts-macros/Cargo.lock
index 229431b180c..8e125546d33 100644
--- a/rts/motoko-rts-macros/Cargo.lock
+++ b/rts/motoko-rts-macros/Cargo.lock
@@ -13,27 +13,27 @@ dependencies = [
 
 [[package]]
 name = "proc-macro2"
-version = "1.0.50"
+version = "1.0.67"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6ef7d57beacfaf2d8aee5937dab7b7f28de3cb8b1828479bb5de2a7106f2bae2"
+checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328"
 dependencies = [
  "unicode-ident",
 ]
 
 [[package]]
 name = "quote"
-version = "1.0.23"
+version = "1.0.33"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b"
+checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae"
 dependencies = [
  "proc-macro2",
 ]
 
 [[package]]
 name = "syn"
-version = "1.0.107"
+version = "1.0.109"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5"
+checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -42,6 +42,6 @@ dependencies = [
 
 [[package]]
 name = "unicode-ident"
-version = "1.0.6"
+version = "1.0.12"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc"
+checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
diff --git a/rts/motoko-rts-macros/Cargo.toml b/rts/motoko-rts-macros/Cargo.toml
index 08e37be9cf5..5af623a0ac7 100644
--- a/rts/motoko-rts-macros/Cargo.toml
+++ b/rts/motoko-rts-macros/Cargo.toml
@@ -8,6 +8,6 @@ edition = "2018"
 proc_macro = true
 
 [dependencies]
-proc-macro2 = "1.0.50"
-syn = { version = "1.0.107", features = ["full"] }
-quote = "1.0.23"
+proc-macro2 = "1.0.66"
+syn = { version = "1.0.109", features = ["full"] }
+quote = "1.0.26"
diff --git a/rts/motoko-rts-macros/src/lib.rs b/rts/motoko-rts-macros/src/lib.rs
index 49ac0e18d3e..10c47079284 100644
--- a/rts/motoko-rts-macros/src/lib.rs
+++ b/rts/motoko-rts-macros/src/lib.rs
@@ -136,3 +136,67 @@ pub fn ic_mem_fn(attr: TokenStream, input: TokenStream) -> TokenStream {
     )
     .into()
 }
+
+/// Feature macro for incremental GC features, in particular forwarding pointers.
+/// Equivalent to using the attribute `#[cfg(feature = "incremental_gc")]`.
+#[proc_macro_attribute]
+pub fn incremental_gc(attr: TokenStream, input: TokenStream) -> TokenStream {
+    assert!(attr.is_empty());
+    let block = syn::parse_macro_input!(input as syn::Item);
+    quote!(
+        #[cfg(feature = "incremental_gc")]
+        #block
+    )
+    .into()
+}
+
+/// Feature macro for non-incremental GC features, in particular forwarding pointers.
+/// Equivalent to using the attribute `#[cfg(not(feature = "incremental_gc"))]`.
+#[proc_macro_attribute]
+pub fn non_incremental_gc(attr: TokenStream, input: TokenStream) -> TokenStream {
+    assert!(attr.is_empty());
+    let block = syn::parse_macro_input!(input as syn::Item);
+    quote!(
+        #[cfg(not(feature = "incremental_gc"))]
+        #block
+    )
+    .into()
+}
+
+#[proc_macro]
+pub fn is_incremental_gc(_item: TokenStream) -> TokenStream {
+    "cfg!(feature = \"incremental_gc\")".parse().unwrap()
+}
+
+/// Feature macro for enhanced orthogonal persistence.
+/// Equivalent to using the attribute `#[cfg(feature = "enhanced_orthogonal_persistence")]`.
+#[proc_macro_attribute]
+pub fn enhanced_orthogonal_persistence(attr: TokenStream, input: TokenStream) -> TokenStream {
+    assert!(attr.is_empty());
+    let block = syn::parse_macro_input!(input as syn::Item);
+    quote!(
+        #[cfg(feature = "enhanced_orthogonal_persistence")]
+        #block
+    )
+    .into()
+}
+
+/// Feature macro for classical persistence, based on Candid stabilization.
+/// Equivalent to using the attribute `#[cfg(not(feature = "enhanced_orthogonal_persistence"))]`.
+#[proc_macro_attribute]
+pub fn classical_persistence(attr: TokenStream, input: TokenStream) -> TokenStream {
+    assert!(attr.is_empty());
+    let block = syn::parse_macro_input!(input as syn::Item);
+    quote!(
+        #[cfg(not(feature = "enhanced_orthogonal_persistence"))]
+        #block
+    )
+    .into()
+}
+
+#[proc_macro]
+pub fn uses_enhanced_orthogonal_persistence(_item: TokenStream) -> TokenStream {
+    "cfg!(feature = \"enhanced_orthogonal_persistence\")"
+        .parse()
+        .unwrap()
+}
diff --git a/rts/motoko-rts-tests/Cargo.lock b/rts/motoko-rts-tests/Cargo.lock
index dea7c950617..a822b629b70 100644
--- a/rts/motoko-rts-tests/Cargo.lock
+++ b/rts/motoko-rts-tests/Cargo.lock
@@ -20,12 +20,6 @@ version = "1.4.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
 
-[[package]]
-name = "cfg-if"
-version = "1.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
-
 [[package]]
 name = "fxhash"
 version = "0.2.1"
@@ -36,27 +30,16 @@ dependencies = [
 ]
 
 [[package]]
-name = "getrandom"
-version = "0.2.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4"
-dependencies = [
- "cfg-if",
- "libc",
- "wasi",
-]
-
-[[package]]
-name = "lazy_static"
-version = "1.4.0"
+name = "libc"
+version = "0.2.153"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
+checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
 
 [[package]]
-name = "libc"
-version = "0.2.142"
+name = "libm"
+version = "0.2.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6a987beff54b60ffa6d51982e1aa1146bc42f19bd26be28b0586f252fccf5317"
+checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4"
 
 [[package]]
 name = "motoko-rts"
@@ -81,7 +64,6 @@ version = "0.1.0"
 dependencies = [
  "byteorder",
  "fxhash",
- "libc",
  "motoko-rts",
  "motoko-rts-macros",
  "oorandom",
@@ -95,6 +77,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd"
 dependencies = [
  "autocfg",
+ "libm",
 ]
 
 [[package]]
@@ -111,41 +94,33 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
 
 [[package]]
 name = "proc-macro2"
-version = "1.0.56"
+version = "1.0.66"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435"
+checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9"
 dependencies = [
  "unicode-ident",
 ]
 
 [[package]]
 name = "proptest"
-version = "1.0.0"
+version = "1.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1e0d9cc07f18492d879586c92b485def06bc850da3118075cd45d50e9c95b0e5"
+checksum = "4e35c06b98bf36aba164cc17cb25f7e232f5c4aeea73baa14b8a9f0d92dbfa65"
 dependencies = [
  "bitflags",
  "byteorder",
- "lazy_static",
  "num-traits",
- "quick-error",
  "rand",
  "rand_chacha",
  "rand_xorshift",
- "regex-syntax",
+ "unarray",
 ]
 
-[[package]]
-name = "quick-error"
-version = "2.0.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3"
-
 [[package]]
 name = "quote"
-version = "1.0.26"
+version = "1.0.31"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc"
+checksum = "5fe8a65d69dd0808184ebb5f836ab526bb259db23c657efa38711b1072ee47f0"
 dependencies = [
  "proc-macro2",
 ]
@@ -156,8 +131,6 @@ version = "0.8.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
 dependencies = [
- "libc",
- "rand_chacha",
  "rand_core",
 ]
 
@@ -176,9 +149,6 @@ name = "rand_core"
 version = "0.6.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
-dependencies = [
- "getrandom",
-]
 
 [[package]]
 name = "rand_xorshift"
@@ -189,12 +159,6 @@ dependencies = [
  "rand_core",
 ]
 
-[[package]]
-name = "regex-syntax"
-version = "0.6.29"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
-
 [[package]]
 name = "syn"
 version = "1.0.109"
@@ -207,13 +171,13 @@ dependencies = [
 ]
 
 [[package]]
-name = "unicode-ident"
-version = "1.0.8"
+name = "unarray"
+version = "0.1.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4"
+checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94"
 
 [[package]]
-name = "wasi"
-version = "0.11.0+wasi-snapshot-preview1"
+name = "unicode-ident"
+version = "1.0.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
+checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c"
diff --git a/rts/motoko-rts-tests/Cargo.toml b/rts/motoko-rts-tests/Cargo.toml
index 82558e9caf6..61147688f00 100644
--- a/rts/motoko-rts-tests/Cargo.toml
+++ b/rts/motoko-rts-tests/Cargo.toml
@@ -4,11 +4,16 @@ version = "0.1.0"
 authors = ["dfinity  {
+            println!("cargo:rustc-link-search=native=../_build");
+            println!("cargo:rustc-link-lib=static=tommath_wasm64");
+        }
+
         "wasm32-wasi" => {
             println!("cargo:rustc-link-search=native=../_build");
-            println!("cargo:rustc-link-lib=static=tommath");
+            println!("cargo:rustc-link-lib=static=tommath_wasm32");
         }
 
         "i686-unknown-linux-gnu" => {
diff --git a/rts/motoko-rts-tests/src/bigint.rs b/rts/motoko-rts-tests/src/bigint.rs
index 2bf93a7dde8..4180be6cd27 100644
--- a/rts/motoko-rts-tests/src/bigint.rs
+++ b/rts/motoko-rts-tests/src/bigint.rs
@@ -1,4 +1,5 @@
 use crate::memory::{initialize_test_memory, reset_test_memory, TestMemory};
+use std::ffi::c_void;
 
 use motoko_rts::bigint::{self, *};
 use motoko_rts::buf::Buf;
@@ -10,17 +11,21 @@ use motoko_rts::types::{Bytes, Value};
 // This global is used to pass a reference to heap to the mp functions
 static mut HEAP: *mut TestMemory = std::ptr::null_mut();
 
+pub unsafe fn set_bigint_heap(heap: *mut TestMemory) {
+    HEAP = heap;
+}
+
 #[no_mangle]
-unsafe extern "C" fn mp_calloc(n_elems: usize, elem_size: Bytes) -> *mut libc::c_void {
+unsafe extern "C" fn mp_calloc(n_elems: usize, elem_size: Bytes) -> *mut c_void {
     bigint::mp_calloc(&mut *HEAP, n_elems, elem_size)
 }
 
 #[no_mangle]
 unsafe extern "C" fn mp_realloc(
-    ptr: *mut libc::c_void,
-    old_size: Bytes,
-    new_size: Bytes,
-) -> *mut libc::c_void {
+    ptr: *mut c_void,
+    old_size: Bytes,
+    new_size: Bytes,
+) -> *mut c_void {
     bigint::mp_realloc(&mut *HEAP, ptr, old_size, new_size)
 }
 
@@ -28,13 +33,13 @@ pub unsafe fn test() {
     println!("Testing BigInt ...");
 
     let mut heap = initialize_test_memory();
-    HEAP = &mut heap;
+    set_bigint_heap(&mut heap);
 
     assert!(bigint_eq(
-        bigint_pow(bigint_of_word32(70), bigint_of_word32(32)),
+        bigint_pow(bigint_of_word64(70), bigint_of_word64(32)),
         bigint_mul(
-            bigint_pow(bigint_of_word32(70), bigint_of_word32(31)),
-            bigint_of_word32(70)
+            bigint_pow(bigint_of_word64(70), bigint_of_word64(31)),
+            bigint_of_word64(70)
         )
     ));
 
@@ -42,10 +47,10 @@ pub unsafe fn test() {
     // (s)leb128 encoding
     //
 
-    let one = bigint_of_word32(1);
-    let two = bigint_of_word32(2);
+    let one = bigint_of_word64(1);
+    let two = bigint_of_word64(2);
     for i in 0..100 {
-        let two_pow_i = bigint_pow(two, bigint_of_word32(i));
+        let two_pow_i = bigint_pow(two, bigint_of_word64(i));
         let minus_one = bigint_sub(two_pow_i, one);
         let plus_one = bigint_add(two_pow_i, one);
 
@@ -61,7 +66,7 @@ pub unsafe fn test() {
         test_bigint_sleb128(bigint_neg(plus_one));
     }
 
-    HEAP = std::ptr::null_mut();
+    set_bigint_heap(std::ptr::null_mut());
     reset_test_memory();
     drop(heap);
 }
diff --git a/rts/motoko-rts-tests/src/bitrel.rs b/rts/motoko-rts-tests/src/bitrel.rs
index a6d650accb0..b79e96d6de4 100644
--- a/rts/motoko-rts-tests/src/bitrel.rs
+++ b/rts/motoko-rts-tests/src/bitrel.rs
@@ -1,21 +1,29 @@
 use motoko_rts::bitrel::BitRel;
+use motoko_rts_macros::uses_enhanced_orthogonal_persistence;
 
 pub unsafe fn test() {
     println!("Testing bitrel ...");
 
-    const K: u32 = 128;
+    const K: usize = 48;
 
-    const N: usize = (2 * K * K * 2 / usize::BITS) as usize;
+    const N: usize = 2 * K * K * 2 / usize::BITS as usize;
 
-    let mut cache: [u32; N] = [0xFFFFFFF; N];
+    let mut cache: [usize; N] = [0xFFFFFFF; N];
 
-    assert_eq!(usize::BITS, 32);
+    assert_eq!(
+        usize::BITS,
+        if uses_enhanced_orthogonal_persistence!() {
+            64
+        } else {
+            32
+        }
+    );
     for size1 in 0..K {
         for size2 in 0..K {
             let w = BitRel::words(size1, size2);
             let bitrel = BitRel {
                 ptr: &mut cache[0],
-                end: &mut cache[w as usize],
+                end: &mut cache[w],
                 size1: size1,
                 size2: size2,
             };
diff --git a/rts/motoko-rts-tests/src/continuation_table.rs b/rts/motoko-rts-tests/src/continuation_table.rs
index 9021e1312b4..9f83fa28f64 100644
--- a/rts/motoko-rts-tests/src/continuation_table.rs
+++ b/rts/motoko-rts-tests/src/continuation_table.rs
@@ -19,21 +19,21 @@ pub unsafe fn test() {
 
     let pointers: [Value; N] = from_fn(|_| alloc_blob(&mut heap, TAG_BLOB_B, Bytes(0)));
 
-    let mut references: [u32; N] = [0; N];
+    let mut references: [usize; N] = [0; N];
     for i in 0..N {
         references[i] = remember_continuation(&mut heap, pointers[i]);
-        assert_eq!(continuation_count(), (i + 1) as u32);
+        assert_eq!(continuation_count(), i + 1);
     }
 
     for i in 0..N / 2 {
         let c = recall_continuation(&mut heap, references[i]);
         assert_eq!(c.get_ptr(), pointers[i].get_ptr());
-        assert_eq!(continuation_count(), (N - i - 1) as u32);
+        assert_eq!(continuation_count(), N - i - 1);
     }
 
     for i in 0..N / 2 {
         references[i] = remember_continuation(&mut heap, pointers[i]);
-        assert_eq!(continuation_count(), (N / 2 + i + 1) as u32);
+        assert_eq!(continuation_count(), N / 2 + i + 1);
     }
 
     for i in (0..N).rev() {
@@ -41,7 +41,7 @@ pub unsafe fn test() {
             recall_continuation(&mut heap, references[i]).get_ptr(),
             pointers[i].get_ptr(),
         );
-        assert_eq!(continuation_count(), i as u32);
+        assert_eq!(continuation_count(), i);
     }
 
     reset_test_memory();
diff --git a/rts/motoko-rts-tests/src/gc.rs b/rts/motoko-rts-tests/src/gc.rs
index c1bc94ff821..bd6c9b57b86 100644
--- a/rts/motoko-rts-tests/src/gc.rs
+++ b/rts/motoko-rts-tests/src/gc.rs
@@ -5,26 +5,37 @@
 //
 // To convert an offset into an address, add heap array's address to the offset.
 
-mod heap;
+use motoko_rts_macros::{
+    classical_persistence, enhanced_orthogonal_persistence, incremental_gc, non_incremental_gc,
+};
+
+#[classical_persistence]
+mod classical;
+#[enhanced_orthogonal_persistence]
+mod enhanced;
+
+#[non_incremental_gc]
+mod compacting;
+#[non_incremental_gc]
+mod generational;
+#[incremental_gc]
 mod incremental;
-mod random;
-mod utils;
 
-use heap::MotokoHeap;
-use utils::{get_scalar_value, make_pointer, read_word, unskew_pointer, ObjectIdx, WORD_SIZE};
-
-use motoko_rts::types::*;
-
-use std::fmt::Write;
+pub mod heap;
+pub mod random;
+pub mod utils;
 
+use self::utils::{GC, GC_IMPLS};
 use fxhash::{FxHashMap, FxHashSet};
+use heap::MotokoHeap;
+use utils::ObjectIdx;
 
 pub fn test() {
     println!("Testing garbage collection ...");
 
     println!("  Testing pre-defined heaps...");
     for test_heap in test_heaps() {
-        test_gcs(&test_heap);
+        run_gc_tests(&test_heap);
     }
 
     println!("  Testing random heaps...");
@@ -39,6 +50,13 @@ pub fn test() {
     test_gc_components();
 }
 
+#[non_incremental_gc]
+fn test_gc_components() {
+    compacting::test();
+    generational::test();
+}
+
+#[incremental_gc]
 fn test_gc_components() {
     incremental::test();
 }
@@ -77,43 +95,52 @@ fn test_heaps() -> Vec {
     ]
 }
 
-fn test_random_heap(seed: u64, max_objects: u32) {
+fn test_random_heap(seed: u64, max_objects: usize) {
     let random_heap = random::generate(seed, max_objects);
-    test_gcs(&random_heap);
+    run_gc_tests(&random_heap);
 }
 
 // All fields are vectors to preserve ordering. Objects are allocated/ added to root arrays etc. in
 // the same order they appear in these vectors. Each object in `heap` should have a unique index,
 // which is checked when creating the heap.
 #[derive(Debug)]
-struct TestHeap {
-    heap: Vec<(ObjectIdx, Vec)>,
-    roots: Vec,
-    continuation_table: Vec,
+pub struct TestHeap {
+    pub heap: Vec<(ObjectIdx, Vec)>,
+    pub roots: Vec,
+    pub continuation_table: Vec,
+}
+
+impl TestHeap {
+    pub fn build(&self, gc: GC, free_space: usize) -> MotokoHeap {
+        MotokoHeap::new(
+            &self.heap,
+            &self.roots,
+            &self.continuation_table,
+            gc,
+            free_space,
+        )
+    }
 }
 
 /// Test all GC implementations with the given heap
-fn test_gcs(heap_descr: &TestHeap) {
-    test_gc(
-        &heap_descr.heap,
-        &heap_descr.roots,
-        &heap_descr.continuation_table,
-    );
+fn run_gc_tests(test_heap: &TestHeap) {
+    for gc in &GC_IMPLS {
+        test_gc(*gc, test_heap);
+    }
     reset_gc();
 }
 
-fn test_gc(
-    refs: &[(ObjectIdx, Vec)],
-    roots: &[ObjectIdx],
-    continuation_table: &[ObjectIdx],
-) {
-    let mut heap = MotokoHeap::new(refs, roots, continuation_table);
+fn test_gc(gc: GC, test_heap: &TestHeap) {
+    let mut heap = test_heap.build(gc, 0);
+    let refs = &test_heap.heap;
+    let roots = &test_heap.roots;
+    let continuation_table = &test_heap.continuation_table;
 
     initialize_gc(&mut heap);
 
     // Check `create_dynamic_heap` sanity
     check_dynamic_heap(
-        false, // before gc
+        CheckMode::Reachability,
         refs,
         roots,
         continuation_table,
@@ -125,8 +152,8 @@ fn test_gc(
         heap.region0_pointer_variable_offset(),
     );
 
-    for _ in 0..3 {
-        let check_all_reclaimed = run(&mut heap);
+    for round in 0..3 {
+        let check_all_reclaimed = gc.run(&mut heap, round);
 
         let heap_base_offset = heap.heap_base_offset();
         let heap_ptr_offset = heap.heap_ptr_offset();
@@ -134,7 +161,11 @@ fn test_gc(
         let continuation_table_variable_offset = heap.continuation_table_variable_offset();
         let region0_ptr_offset = heap.region0_pointer_variable_offset();
         check_dynamic_heap(
-            check_all_reclaimed, // check for unreachable objects
+            if check_all_reclaimed {
+                CheckMode::AllReclaimed
+            } else {
+                CheckMode::Reachability
+            },
             refs,
             roots,
             continuation_table,
@@ -148,24 +179,32 @@ fn test_gc(
     }
 }
 
+#[non_incremental_gc]
+fn initialize_gc(_heap: &mut MotokoHeap) {}
+
+#[incremental_gc]
 fn initialize_gc(heap: &mut MotokoHeap) {
     use motoko_rts::gc::incremental::{
         get_partitioned_heap, set_incremental_gc_state, IncrementalGC,
     };
+    use motoko_rts::types::Bytes;
     unsafe {
-        let state = IncrementalGC::initial_gc_state(heap, heap.heap_base_address());
+        let state = IncrementalGC::::initial_gc_state(heap.heap_base_address());
         set_incremental_gc_state(Some(state));
         let allocation_size = heap.heap_ptr_address() - heap.heap_base_address();
 
         // Synchronize the partitioned heap with one big combined allocation by starting from the base pointer as the heap pointer.
-        let result =
-            get_partitioned_heap().allocate(heap, Bytes(allocation_size as u32).to_words());
+        let result = get_partitioned_heap().allocate(heap, Bytes(allocation_size).to_words());
         // Check that the heap pointer (here equals base pointer) is unchanged, i.e. no partition switch has happened.
         // This is a restriction in the unit test where `MotokoHeap` only supports contiguous bump allocation during initialization.
         assert_eq!(result.get_ptr(), heap.heap_base_address());
     }
 }
 
+#[non_incremental_gc]
+fn reset_gc() {}
+
+#[incremental_gc]
 fn reset_gc() {
     use motoko_rts::gc::incremental::set_incremental_gc_state;
     unsafe {
@@ -173,210 +212,69 @@ fn reset_gc() {
     }
 }
 
-/// Check the dynamic heap:
-///
-/// - All (and in post-gc mode, only) reachable objects should be in the heap. Reachable objects
-///   are those in the transitive closure of roots.
-///
-/// - Objects should point to right objects. E.g. if object with index X points to objects with
-///   indices Y and Z in the `objects` map, it should point to objects with indices Y and Z on the
-///   heap.
-///
-fn check_dynamic_heap(
-    post_gc: bool,
+#[derive(Debug, PartialEq)]
+pub enum CheckMode {
+    /// Check reachability of all necessary objects.
+    Reachability,
+    /// Check the reachability of all necessary objects and
+    /// that all garbage objects have been reclaimed.
+    AllReclaimed,
+    /// Check valid dynamic heap after stabilization.
+    #[cfg(feature = "enhanced_orthogonal_persistence")]
+    Stabilzation,
+}
+
+#[classical_persistence]
+pub fn check_dynamic_heap(
+    mode: CheckMode,
     objects: &[(ObjectIdx, Vec)],
     roots: &[ObjectIdx],
     continuation_table: &[ObjectIdx],
     heap: &[u8],
     heap_base_offset: usize,
     heap_ptr_offset: usize,
-    static_root_array_variable_offset: usize,
+    _static_root_array_variable_offset: usize,
     continuation_table_variable_offset: usize,
     region0_ptr_offset: usize,
 ) {
-    let objects_map: FxHashMap = objects
-        .iter()
-        .map(|(obj, refs)| (*obj, refs.as_slice()))
-        .collect();
-
-    // Current offset in the heap
-    let mut offset = heap_base_offset;
-
-    // Maps objects to their addresses (not offsets!). Used when debugging duplicate objects.
-    let mut seen: FxHashMap = Default::default();
-
-    let static_root_array_address =
-        unskew_pointer(read_word(heap, static_root_array_variable_offset));
-    let static_root_array_offset = static_root_array_address as usize - heap.as_ptr() as usize;
-
-    let continuation_table_address =
-        unskew_pointer(read_word(heap, continuation_table_variable_offset));
-    let continuation_table_offset = continuation_table_address as usize - heap.as_ptr() as usize;
-
-    let region0_addr = unskew_pointer(read_word(heap, region0_ptr_offset));
-
-    while offset < heap_ptr_offset {
-        let object_offset = offset;
-
-        // Address of the current object. Used for debugging.
-        let address = offset as usize + heap.as_ptr() as usize;
-
-        if object_offset == static_root_array_offset {
-            check_static_root_array(object_offset, roots, heap);
-            offset += (size_of::() + Words(roots.len() as u32))
-                .to_bytes()
-                .as_usize();
-            continue;
-        }
-
-        if object_offset == continuation_table_offset {
-            check_continuation_table(object_offset, continuation_table, heap);
-            offset += (size_of::() + Words(continuation_table.len() as u32))
-                .to_bytes()
-                .as_usize();
-            continue;
-        }
-
-        let tag = read_word(heap, offset);
-        offset += WORD_SIZE;
-
-        if tag == TAG_ONE_WORD_FILLER {
-        } else if tag == TAG_FREE_SPACE {
-            let words = read_word(heap, offset) as usize;
-            offset += WORD_SIZE;
-            offset += words * WORD_SIZE;
-        } else {
-            let forward;
-            forward = read_word(heap, offset);
-            offset += WORD_SIZE;
-
-            let is_forwarded = forward != make_pointer(address as u32);
-
-            if tag == TAG_MUTBOX {
-                // MutBoxes of static root array, will be scanned indirectly when checking the static root array.
-                offset += WORD_SIZE;
-            } else if tag == TAG_BLOB_B {
-                assert!(!is_forwarded);
-                // in-heap mark stack blobs
-                let length = read_word(heap, offset);
-                offset += WORD_SIZE + length as usize;
-            } else if tag == TAG_REGION {
-                if !is_forwarded {
-                    assert_eq!(address, region0_addr as usize);
-                }
-                offset += (size_of::() - size_of::())
-                    .to_bytes()
-                    .as_usize();
-            } else {
-                assert!(is_array_or_slice_tag(tag));
-
-                if is_forwarded {
-                    let forward_offset = forward as usize - heap.as_ptr() as usize;
-                    let length = read_word(
-                        heap,
-                        forward_offset + size_of::().to_bytes().as_usize(),
-                    );
-
-                    // Skip stale object version that has been relocated during incremental GC.
-                    offset += length as usize * WORD_SIZE;
-                } else {
-                    let n_fields = read_word(heap, offset);
-                    offset += WORD_SIZE;
-
-                    // There should be at least one field for the index
-                    assert!(n_fields >= 1);
-
-                    let object_idx = get_scalar_value(read_word(heap, offset));
-                    offset += WORD_SIZE;
-
-                    let old = seen.insert(object_idx, address);
-                    if let Some(old) = old {
-                        panic!(
-                            "Object with index {} seen multiple times: {:#x}, {:#x}",
-                            object_idx, old, address
-                        );
-                    }
-
-                    let object_expected_pointees =
-                        objects_map.get(&object_idx).unwrap_or_else(|| {
-                            panic!("Object with index {} is not in the objects map", object_idx)
-                        });
-
-                    for field_idx in 1..n_fields {
-                        let field = read_word(heap, offset);
-                        offset += WORD_SIZE;
-
-                        // Get index of the object pointed by the field
-                        let pointee_address = field.wrapping_add(1); // unskew
-
-                        let pointee_idx = read_object_id(pointee_address, heap);
-                        let expected_pointee_idx =
-                            object_expected_pointees[(field_idx - 1) as usize];
-                        assert_eq!(
-                            pointee_idx,
-                            expected_pointee_idx,
-                            "Object with index {} points to {} in field {}, but expected to point to {}",
-                            object_idx,
-                            pointee_idx,
-                            field_idx - 1,
-                            expected_pointee_idx,
-                        );
-                    }
-                }
-            }
-        }
-    }
-
-    // At this point we've checked that all seen objects point to the expected objects (as
-    // specified by `objects`). Check that we've seen the reachable objects and only the reachable
-    // objects.
-    let reachable_objects = compute_reachable_objects(roots, continuation_table, &objects_map);
-
-    // Objects we've seen in the heap
-    let seen_objects: FxHashSet = seen.keys().copied().collect();
-
-    // Reachable objects that we haven't seen in the heap
-    let missing_objects: Vec = reachable_objects
-        .difference(&seen_objects)
-        .copied()
-        .collect();
-
-    let mut error_message = String::new();
-
-    if !missing_objects.is_empty() {
-        write!(
-            &mut error_message,
-            "Reachable objects missing in the {} heap: {:?}",
-            if post_gc { "post-gc" } else { "pre-gc" },
-            missing_objects,
-        )
-        .unwrap();
-    }
-
-    if post_gc {
-        // Unreachable objects that we've seen in the heap
-        let extra_objects: Vec = seen_objects
-            .difference(&reachable_objects)
-            .copied()
-            .collect();
-
-        if !extra_objects.is_empty() {
-            if !error_message.is_empty() {
-                error_message.push('\n');
-            }
-
-            write!(
-                &mut error_message,
-                "Unreachable objects seen in the post-GC heap: {:?}",
-                extra_objects,
-            )
-            .unwrap();
-        }
-    }
+    self::classical::check_dynamic_heap(
+        mode,
+        objects,
+        roots,
+        continuation_table,
+        heap,
+        heap_base_offset,
+        heap_ptr_offset,
+        continuation_table_variable_offset,
+        region0_ptr_offset,
+    );
+}
 
-    if !error_message.is_empty() {
-        panic!("{}", error_message);
-    }
+#[enhanced_orthogonal_persistence]
+pub fn check_dynamic_heap(
+    mode: CheckMode,
+    objects: &[(ObjectIdx, Vec)],
+    roots: &[ObjectIdx],
+    continuation_table: &[ObjectIdx],
+    heap: &[u8],
+    heap_base_offset: usize,
+    heap_ptr_offset: usize,
+    static_root_array_variable_offset: usize,
+    continuation_table_variable_offset: usize,
+    region0_ptr_offset: usize,
+) {
+    self::enhanced::check_dynamic_heap(
+        mode,
+        objects,
+        roots,
+        continuation_table,
+        heap,
+        heap_base_offset,
+        heap_ptr_offset,
+        static_root_array_variable_offset,
+        continuation_table_variable_offset,
+        region0_ptr_offset,
+    );
 }
 
 fn compute_reachable_objects(
@@ -403,91 +301,3 @@ fn compute_reachable_objects(
 
     closure
 }
-
-fn check_static_root_array(mut offset: usize, roots: &[ObjectIdx], heap: &[u8]) {
-    let array_address = heap.as_ptr() as usize + offset;
-    assert_eq!(read_word(heap, offset), TAG_ARRAY_M);
-    offset += WORD_SIZE;
-
-    assert_eq!(read_word(heap, offset), make_pointer(array_address as u32));
-    offset += WORD_SIZE;
-
-    assert_eq!(read_word(heap, offset), roots.len() as u32);
-    offset += WORD_SIZE;
-
-    for obj in roots.iter() {
-        let mutbox_address = unskew_pointer(read_word(heap, offset));
-        offset += WORD_SIZE;
-
-        let object_address = unskew_pointer(read_mutbox_field(mutbox_address, heap));
-        let idx = read_object_id(object_address, heap);
-        assert_eq!(idx, *obj);
-    }
-}
-
-fn read_mutbox_field(mutbox_address: u32, heap: &[u8]) -> u32 {
-    let mut mutbox_offset = mutbox_address as usize - heap.as_ptr() as usize;
-
-    let mutbox_tag = read_word(heap, mutbox_offset);
-    assert_eq!(mutbox_tag, TAG_MUTBOX);
-    mutbox_offset += WORD_SIZE;
-
-    assert_eq!(read_word(heap, mutbox_offset), make_pointer(mutbox_address));
-    mutbox_offset += WORD_SIZE;
-
-    read_word(heap, mutbox_offset)
-}
-
-fn check_continuation_table(mut offset: usize, continuation_table: &[ObjectIdx], heap: &[u8]) {
-    let table_addr = heap.as_ptr() as usize + offset;
-    assert_eq!(read_word(heap, offset), TAG_ARRAY_M);
-    offset += WORD_SIZE;
-
-    assert_eq!(read_word(heap, offset), make_pointer(table_addr as u32));
-    offset += WORD_SIZE;
-
-    assert_eq!(read_word(heap, offset), continuation_table.len() as u32);
-    offset += WORD_SIZE;
-
-    for obj in continuation_table.iter() {
-        let ptr = unskew_pointer(read_word(heap, offset));
-        offset += WORD_SIZE;
-
-        let idx = read_object_id(ptr, heap);
-        assert_eq!(idx, *obj);
-    }
-}
-
-fn read_object_id(object_address: u32, heap: &[u8]) -> ObjectIdx {
-    let tag = read_word(heap, object_address as usize - heap.as_ptr() as usize);
-    assert!(is_array_or_slice_tag(tag));
-
-    // Skip object header for idx
-    let idx_address = object_address as usize + size_of::().to_bytes().as_usize();
-    get_scalar_value(read_word(heap, idx_address - heap.as_ptr() as usize))
-}
-
-fn run(heap: &mut MotokoHeap) -> bool {
-    let static_root = heap.static_root_array_variable_address() as *mut Value;
-    let continuation_table_location = heap.continuation_table_variable_address() as *mut Value;
-    let region0_pointer_location = heap.region0_pointer_variable_address() as *mut Value;
-    let unused_root = &mut Value::from_scalar(0) as *mut Value;
-
-    unsafe {
-        use motoko_rts::gc::incremental::{get_incremental_gc_state, IncrementalGC};
-        const INCREMENTS_UNTIL_COMPLETION: usize = 16;
-        for _ in 0..INCREMENTS_UNTIL_COMPLETION {
-            let roots = [
-                static_root,
-                continuation_table_location,
-                region0_pointer_location,
-                unused_root,
-                unused_root,
-                unused_root,
-            ];
-            IncrementalGC::instance(heap, get_incremental_gc_state())
-                .empty_call_stack_increment(roots);
-        }
-        false
-    }
-}
diff --git a/rts/motoko-rts-tests/src/gc/classical.rs b/rts/motoko-rts-tests/src/gc/classical.rs
new file mode 100644
index 00000000000..73bdf9f087f
--- /dev/null
+++ b/rts/motoko-rts-tests/src/gc/classical.rs
@@ -0,0 +1,358 @@
+use super::heap::MotokoHeap;
+use super::utils::{
+    get_scalar_value, make_pointer, read_word, unskew_pointer, ObjectIdx, GC, WORD_SIZE,
+};
+use crate::gc::{compute_reachable_objects, CheckMode};
+use fxhash::{FxHashMap, FxHashSet};
+use motoko_rts::types::*;
+use motoko_rts_macros::{incremental_gc, is_incremental_gc, non_incremental_gc};
+use std::fmt::Write;
+
+impl GC {
+    #[non_incremental_gc]
+    pub fn run(&self, heap: &mut MotokoHeap, _round: usize) -> bool {
+        let heap_base = heap.heap_base_address();
+        let static_roots = Value::from_ptr(heap.static_root_array_variable_address());
+        let mut region_0 = Value::from_scalar(0);
+        let continuation_table_ptr_address =
+            heap.continuation_table_variable_address() as *mut Value;
+
+        let heap_1 = heap.clone();
+        let heap_2 = heap.clone();
+
+        match self {
+            GC::Copying => {
+                unsafe {
+                    motoko_rts::gc::copying::copying_gc_internal(
+                        heap,
+                        heap_base,
+                        // get_hp
+                        || heap_1.heap_ptr_address(),
+                        // set_hp
+                        move |hp| heap_2.set_heap_ptr_address(hp),
+                        static_roots,
+                        continuation_table_ptr_address,
+                        &mut region_0,
+                        // note_live_size
+                        |_live_size| {},
+                        // note_reclaimed
+                        |_reclaimed| {},
+                    );
+                }
+                true
+            }
+
+            GC::MarkCompact => {
+                unsafe {
+                    motoko_rts::gc::mark_compact::compacting_gc_internal(
+                        heap,
+                        heap_base,
+                        // get_hp
+                        || heap_1.heap_ptr_address(),
+                        // set_hp
+                        move |hp| heap_2.set_heap_ptr_address(hp),
+                        static_roots,
+                        continuation_table_ptr_address,
+                        &mut region_0,
+                        // note_live_size
+                        |_live_size| {},
+                        // note_reclaimed
+                        |_reclaimed| {},
+                    );
+                }
+                true
+            }
+
+            GC::Generational => {
+                use motoko_rts::gc::{
+                    generational::{
+                        write_barrier::{LAST_HP, REMEMBERED_SET},
+                        GenerationalGC, Strategy,
+                    },
+                    remembered_set::RememberedSet,
+                };
+
+                let strategy = match _round {
+                    0 => Strategy::Young,
+                    _ => Strategy::Full,
+                };
+                unsafe {
+                    REMEMBERED_SET = Some(RememberedSet::new(heap));
+                    LAST_HP = heap_1.last_ptr_address();
+
+                    let limits = motoko_rts::gc::generational::Limits {
+                        base: heap_base,
+                        last_free: heap_1.last_ptr_address(),
+                        free: heap_1.heap_ptr_address(),
+                    };
+                    let roots = motoko_rts::gc::generational::Roots {
+                        static_roots,
+                        continuation_table_ptr_loc: continuation_table_ptr_address,
+                        region0_ptr_loc: &mut region_0,
+                    };
+                    let gc_heap = motoko_rts::gc::generational::Heap {
+                        mem: heap,
+                        limits,
+                        roots,
+                    };
+                    let mut gc = GenerationalGC::new(gc_heap, strategy);
+                    gc.run();
+                    let free = gc.heap.limits.free;
+                    heap.set_last_ptr_address(free);
+                    heap.set_heap_ptr_address(free);
+                }
+                _round >= 2
+            }
+        }
+    }
+
+    #[incremental_gc]
+    pub fn run(&self, heap: &mut MotokoHeap, _round: usize) -> bool {
+        let static_roots = Value::from_ptr(heap.static_root_array_variable_address());
+        let continuation_table_ptr_address =
+            heap.continuation_table_variable_address() as *mut Value;
+        let region0_ptr_address = heap.region0_pointer_variable_address() as *mut Value;
+
+        match self {
+            GC::Incremental => unsafe {
+                use motoko_rts::gc::incremental::{get_incremental_gc_state, IncrementalGC};
+                const INCREMENTS_UNTIL_COMPLETION: usize = 16;
+                for _ in 0..INCREMENTS_UNTIL_COMPLETION {
+                    let roots = motoko_rts::gc::incremental::roots::Roots {
+                        static_roots,
+                        continuation_table_location: continuation_table_ptr_address,
+                        region0_ptr_location: region0_ptr_address,
+                    };
+                    IncrementalGC::instance(heap, get_incremental_gc_state())
+                        .empty_call_stack_increment(roots);
+                }
+                false
+            },
+        }
+    }
+}
+
+/// Check the dynamic heap:
+///
+/// - All (and in post-gc mode, only) reachable objects should be in the heap. Reachable objects
+///   are those in the transitive closure of roots.
+///
+/// - Objects should point to right objects. E.g. if object with index X points to objects with
+///   indices Y and Z in the `objects` map, it should point to objects with indices Y and Z on the
+///   heap.
+///
+pub fn check_dynamic_heap(
+    mode: CheckMode,
+    objects: &[(ObjectIdx, Vec)],
+    roots: &[ObjectIdx],
+    continuation_table: &[ObjectIdx],
+    heap: &[u8],
+    heap_base_offset: usize,
+    heap_ptr_offset: usize,
+    continuation_table_ptr_offset: usize,
+    region0_ptr_offset: usize,
+) {
+    let incremental = cfg!(feature = "incremental_gc");
+    let objects_map: FxHashMap = objects
+        .iter()
+        .map(|(obj, refs)| (*obj, refs.as_slice()))
+        .collect();
+
+    // Current offset in the heap
+    let mut offset = heap_base_offset;
+
+    // Maps objects to their addresses (not offsets!). Used when debugging duplicate objects.
+    let mut seen: FxHashMap = Default::default();
+
+    let continuation_table_addr = unskew_pointer(read_word(heap, continuation_table_ptr_offset));
+    let continuation_table_offset = continuation_table_addr - heap.as_ptr() as usize;
+
+    let region0_addr = unskew_pointer(read_word(heap, region0_ptr_offset));
+
+    while offset < heap_ptr_offset {
+        let object_offset = offset;
+
+        // Address of the current object. Used for debugging.
+        let address = offset + heap.as_ptr() as usize;
+
+        if object_offset == continuation_table_offset {
+            check_continuation_table(object_offset, continuation_table, heap);
+            offset += (size_of::() + Words(continuation_table.len()))
+                .to_bytes()
+                .as_usize();
+            continue;
+        }
+
+        let tag = read_word(heap, offset);
+        offset += WORD_SIZE;
+
+        if tag == TAG_ONE_WORD_FILLER {
+            assert!(incremental);
+        } else if tag == TAG_FREE_SPACE {
+            assert!(incremental);
+            let words = read_word(heap, offset);
+            offset += WORD_SIZE;
+            offset += words * WORD_SIZE;
+        } else {
+            let forward;
+            if incremental {
+                forward = read_word(heap, offset);
+                offset += WORD_SIZE;
+            } else {
+                forward = make_pointer(address);
+            }
+
+            let is_forwarded = forward != make_pointer(address);
+
+            if incremental && tag == TAG_BLOB_B {
+                assert!(!is_forwarded);
+                // in-heap mark stack blobs
+                let length = read_word(heap, offset);
+                offset += WORD_SIZE + length;
+            } else if tag == TAG_REGION {
+                if !is_forwarded {
+                    assert_eq!(address, region0_addr);
+                }
+                offset += (size_of::() - size_of::())
+                    .to_bytes()
+                    .as_usize();
+            } else {
+                if incremental {
+                    assert!(is_array_or_slice_tag(tag));
+                } else {
+                    assert!(is_base_array_tag(tag));
+                }
+
+                if is_forwarded {
+                    assert!(incremental);
+
+                    let forward_offset = forward - heap.as_ptr() as usize;
+                    let length = read_word(
+                        heap,
+                        forward_offset + size_of::().to_bytes().as_usize(),
+                    );
+
+                    // Skip stale object version that has been relocated during incremental GC.
+                    offset += length * WORD_SIZE;
+                } else {
+                    let n_fields = read_word(heap, offset);
+                    offset += WORD_SIZE;
+
+                    // There should be at least one field for the index
+                    assert!(n_fields >= 1);
+
+                    let object_idx = get_scalar_value(read_word(heap, offset));
+                    offset += WORD_SIZE;
+
+                    let old = seen.insert(object_idx, address);
+                    if let Some(old) = old {
+                        panic!(
+                            "Object with index {} seen multiple times: {:#x}, {:#x}",
+                            object_idx, old, address
+                        );
+                    }
+
+                    let object_expected_pointees =
+                        objects_map.get(&object_idx).unwrap_or_else(|| {
+                            panic!("Object with index {} is not in the objects map", object_idx)
+                        });
+
+                    for field_idx in 1..n_fields {
+                        let field = read_word(heap, offset);
+                        offset += WORD_SIZE;
+                        // Get index of the object pointed by the field
+                        let pointee_address = field.wrapping_add(1); // unskew
+                        let pointee_offset = pointee_address - heap.as_ptr() as usize;
+                        let pointee_idx_offset =
+                            pointee_offset + size_of::().to_bytes().as_usize(); // skip array header (incl. length)
+                        let pointee_idx = get_scalar_value(read_word(heap, pointee_idx_offset));
+                        let expected_pointee_idx = object_expected_pointees[field_idx - 1];
+                        assert_eq!(
+                            pointee_idx,
+                            expected_pointee_idx,
+                            "Object with index {} points to {} in field {}, but expected to point to {}",
+                            object_idx,
+                            pointee_idx,
+                            field_idx - 1,
+                            expected_pointee_idx,
+                        );
+                    }
+                }
+            }
+        }
+    }
+
+    // At this point we've checked that all seen objects point to the expected objects (as
+    // specified by `objects`). Check that we've seen the reachable objects and only the reachable
+    // objects.
+    let reachable_objects = compute_reachable_objects(roots, continuation_table, &objects_map);
+
+    // Objects we've seen in the heap
+    let seen_objects: FxHashSet = seen.keys().copied().collect();
+
+    // Reachable objects that we haven't seen in the heap
+    let missing_objects: Vec = reachable_objects
+        .difference(&seen_objects)
+        .copied()
+        .collect();
+
+    let mut error_message = String::new();
+
+    if !missing_objects.is_empty() {
+        write!(
+            &mut error_message,
+            "Reachable objects missing in the heap: {missing_objects:?}",
+        )
+        .unwrap();
+    }
+
+    if mode == CheckMode::AllReclaimed {
+        // Unreachable objects that we've seen in the heap
+        let extra_objects: Vec = seen_objects
+            .difference(&reachable_objects)
+            .copied()
+            .collect();
+
+        if !extra_objects.is_empty() {
+            if !error_message.is_empty() {
+                error_message.push('\n');
+            }
+
+            write!(
+                &mut error_message,
+                "Unreachable objects seen in the post-GC heap: {:?}",
+                extra_objects,
+            )
+            .unwrap();
+        }
+    }
+
+    if !error_message.is_empty() {
+        panic!("{}", error_message);
+    }
+}
+
+fn check_continuation_table(mut offset: usize, continuation_table: &[ObjectIdx], heap: &[u8]) {
+    let table_addr = heap.as_ptr() as usize + offset;
+    assert_eq!(read_word(heap, offset), TAG_ARRAY_M);
+    offset += WORD_SIZE;
+
+    if is_incremental_gc!() {
+        assert_eq!(read_word(heap, offset), make_pointer(table_addr));
+        offset += WORD_SIZE;
+    }
+
+    assert_eq!(read_word(heap, offset), continuation_table.len());
+    offset += WORD_SIZE;
+
+    for obj in continuation_table.iter() {
+        let ptr = unskew_pointer(read_word(heap, offset));
+        offset += WORD_SIZE;
+
+        // Skip object header for idx
+        let idx_address = ptr + size_of::().to_bytes().as_usize();
+        let idx = get_scalar_value(read_word(heap, idx_address - heap.as_ptr() as usize));
+
+        assert_eq!(idx, *obj);
+    }
+}
diff --git a/rts/motoko-rts-tests/src/gc/compacting.rs b/rts/motoko-rts-tests/src/gc/compacting.rs
new file mode 100644
index 00000000000..42cfa936cd2
--- /dev/null
+++ b/rts/motoko-rts-tests/src/gc/compacting.rs
@@ -0,0 +1,10 @@
+mod bitmap;
+mod mark_stack;
+
+pub fn test() {
+    println!("Testing compacting GC components ...");
+    unsafe {
+        bitmap::test();
+        mark_stack::test();
+    }
+}
diff --git a/rts/motoko-rts-tests/src/gc/compacting/bitmap.rs b/rts/motoko-rts-tests/src/gc/compacting/bitmap.rs
new file mode 100644
index 00000000000..237c047ad9c
--- /dev/null
+++ b/rts/motoko-rts-tests/src/gc/compacting/bitmap.rs
@@ -0,0 +1,166 @@
+use crate::memory::TestMemory;
+
+use motoko_rts::constants::WORD_SIZE;
+use motoko_rts::gc::mark_compact::bitmap::{
+    alloc_bitmap, free_bitmap, get_bit, iter_bits, set_bit, BITMAP_ITER_END,
+};
+use motoko_rts::memory::Memory;
+use motoko_rts::types::{Bytes, Words};
+
+use std::collections::HashSet;
+
+use proptest::strategy::Strategy;
+use proptest::test_runner::{Config, TestCaseError, TestCaseResult, TestRunner};
+
+pub unsafe fn test() {
+    println!("  Testing bitmap ...");
+    println!("    Testing set_bit/get_bit");
+
+    {
+        let mut mem = TestMemory::new(Words(1024));
+        test_set_get(&mut mem, vec![0, 33]).unwrap();
+    }
+
+    let mut proptest_runner = TestRunner::new(Config {
+        cases: 100,
+        failure_persistence: None,
+        ..Default::default()
+    });
+
+    proptest_runner
+        .run(&bit_index_vector_strategy(), |bits| {
+            // Max bit idx = 65,534, requires 2048 words. Add 2 words for Blob header (header +
+            // length).
+            let mut mem = TestMemory::new(Words(2051));
+            test_set_get_proptest(&mut mem, bits)
+        })
+        .unwrap();
+
+    println!("    Testing bit iteration");
+    proptest_runner
+        .run(&bit_index_vector_strategy(), |bits| {
+            let mut hash_set = HashSet::new();
+            for value in bits {
+                hash_set.insert(value);
+            }
+            let mut mem = TestMemory::new(Words(2051));
+            test_bit_iter(&mut mem, hash_set)
+        })
+        .unwrap();
+}
+
+/// Generates vectors of bit indices
+fn bit_index_vector_strategy() -> impl Strategy> {
+    proptest::collection::vec(0u16..u16::MAX, 0..1_000)
+}
+
+fn test_set_get_proptest(mem: &mut M, bits: Vec) -> TestCaseResult {
+    test_set_get(mem, bits).map_err(|err| TestCaseError::Fail(err.into()))
+}
+
+fn test_set_get(mem: &mut M, mut bits: Vec) -> Result<(), String> {
+    if bits.is_empty() {
+        return Ok(());
+    }
+
+    unsafe {
+        alloc_bitmap(
+            mem,
+            Bytes((*bits.iter().max().unwrap() as usize + 1) * WORD_SIZE),
+            0,
+        );
+
+        for bit in &bits {
+            set_bit(*bit as usize);
+            if !get_bit(*bit as usize) {
+                return Err("set-get error".to_string());
+            }
+        }
+
+        bits.sort();
+
+        let mut last_bit: Option = None;
+        for bit in bits {
+            // Bits from the last set bit up to current bit should be 0
+            if let Some(last_bit) = last_bit {
+                for i in last_bit + 1..bit {
+                    if get_bit(i as usize) {
+                        return Err(format!("get_bit({}) of unset bit is true", i));
+                    }
+                }
+            }
+
+            // Current bit should be set
+            if !get_bit(bit as usize) {
+                return Err("get_bit of set bit is false".to_string());
+            }
+
+            last_bit = Some(bit);
+        }
+
+        free_bitmap()
+    }
+
+    Ok(())
+}
+
+fn test_bit_iter(mem: &mut M, bits: HashSet) -> TestCaseResult {
+    // If the max bit is N, the heap size is at least N+1 words
+    let heap_size = Words(
+        bits.iter()
+            .max()
+            .map(|max_bit| *max_bit as usize + 1)
+            .unwrap_or(0),
+    )
+    .to_bytes();
+
+    unsafe {
+        alloc_bitmap(mem, heap_size, 0);
+
+        for bit in bits.iter() {
+            set_bit(*bit as usize);
+        }
+
+        let mut bits_sorted = bits.into_iter().collect::>();
+        bits_sorted.sort();
+
+        let mut bit_vec_iter = bits_sorted.into_iter();
+        let mut bit_map_iter = iter_bits();
+
+        while let Some(vec_bit) = bit_vec_iter.next() {
+            match bit_map_iter.next() {
+                BITMAP_ITER_END => {
+                    return Err(TestCaseError::Fail(
+                        "bitmap iterator didn't yield but there are more bits".into(),
+                    ));
+                }
+                map_bit => {
+                    if map_bit != vec_bit as usize {
+                        return Err(TestCaseError::Fail(
+                            format!(
+                                "bitmap iterator yields {}, but actual bit is {}",
+                                map_bit, vec_bit
+                            )
+                            .into(),
+                        ));
+                    }
+                }
+            }
+        }
+
+        let map_bit = bit_map_iter.next();
+        if map_bit != BITMAP_ITER_END {
+            return Err(TestCaseError::Fail(
+                format!(
+                    "bitmap iterator yields {}, but there are no more bits left",
+                    map_bit
+                )
+                .into(),
+            ));
+        }
+
+        free_bitmap()
+    }
+
+    Ok(())
+}
diff --git a/rts/motoko-rts-tests/src/gc/compacting/mark_stack.rs b/rts/motoko-rts-tests/src/gc/compacting/mark_stack.rs
new file mode 100644
index 00000000000..756fcb9d99e
--- /dev/null
+++ b/rts/motoko-rts-tests/src/gc/compacting/mark_stack.rs
@@ -0,0 +1,115 @@
+use crate::memory::TestMemory;
+
+use motoko_rts::gc::mark_compact::mark_stack::{
+    alloc_mark_stack, free_mark_stack, grow_stack, pop_mark_stack, push_mark_stack,
+    INIT_STACK_SIZE, STACK_BASE, STACK_PTR, STACK_TOP,
+};
+use motoko_rts::memory::Memory;
+use motoko_rts::types::*;
+
+use proptest::test_runner::{Config, TestCaseError, TestCaseResult, TestRunner};
+
+pub unsafe fn test() {
+    println!("  Testing mark stack ...");
+
+    test_push_pop();
+    test_grow_stack();
+}
+
+fn test_push_pop() {
+    println!("    Testing push/pop");
+
+    let mut proptest_runner = TestRunner::new(Config {
+        cases: 100,
+        failure_persistence: None,
+        ..Default::default()
+    });
+
+    proptest_runner
+        .run(&(0u32..1000u32), |n_objs| {
+            let mut mem = TestMemory::new(Words(1024 * 1024));
+            test_(&mut mem, n_objs)
+        })
+        .unwrap();
+}
+
+static TAGS: [Tag; 23] = [
+    TAG_OBJECT,
+    TAG_ARRAY_I,
+    TAG_ARRAY_M,
+    TAG_ARRAY_T,
+    TAG_ARRAY_S,
+    TAG_BITS64_U,
+    TAG_BITS64_S,
+    TAG_BITS64_F,
+    TAG_MUTBOX,
+    TAG_CLOSURE,
+    TAG_SOME,
+    TAG_VARIANT,
+    TAG_BLOB_B,
+    TAG_BLOB_T,
+    TAG_BLOB_P,
+    TAG_BLOB_A,
+    TAG_FWD_PTR,
+    TAG_BITS32_U,
+    TAG_BITS32_S,
+    TAG_BITS32_F,
+    TAG_BIGINT,
+    TAG_CONCAT,
+    TAG_NULL,
+];
+
+fn test_(mem: &mut M, n_objs: u32) -> TestCaseResult {
+    let objs: Vec = (0..n_objs).collect();
+
+    unsafe {
+        alloc_mark_stack(mem);
+
+        for obj in &objs {
+            push_mark_stack(mem, *obj as usize, TAGS[(*obj as usize) % TAGS.len()]);
+        }
+
+        for obj in objs.iter().copied().rev() {
+            let popped = pop_mark_stack();
+            if popped != Some((obj as usize, TAGS[(obj as usize) % TAGS.len()])) {
+                free_mark_stack();
+                return Err(TestCaseError::Fail(
+                    format!(
+                        "Unexpected object popped, expected={:?}, popped={:?}",
+                        obj, popped
+                    )
+                    .into(),
+                ));
+            }
+        }
+
+        free_mark_stack();
+    }
+
+    Ok(())
+}
+
+unsafe fn test_grow_stack() {
+    println!("    Testing grow_stack");
+
+    // Allow doubling twice
+    let mut mem = TestMemory::new(
+        size_of::() + INIT_STACK_SIZE + INIT_STACK_SIZE + INIT_STACK_SIZE * 2,
+    );
+
+    alloc_mark_stack(&mut mem);
+
+    let mut current_size = INIT_STACK_SIZE.as_usize();
+    assert_eq!(STACK_BASE.add(current_size), STACK_TOP);
+    assert_eq!(STACK_BASE, STACK_PTR);
+
+    grow_stack(&mut mem);
+    current_size *= 2;
+    assert_eq!(STACK_BASE.add(current_size), STACK_TOP);
+    assert_eq!(STACK_BASE, STACK_PTR);
+
+    grow_stack(&mut mem);
+    current_size *= 2;
+    assert_eq!(STACK_BASE.add(current_size), STACK_TOP);
+    assert_eq!(STACK_BASE, STACK_PTR);
+}
diff --git a/rts/motoko-rts-tests/src/gc/enhanced.rs b/rts/motoko-rts-tests/src/gc/enhanced.rs
new file mode 100644
index 00000000000..645c95cef71
--- /dev/null
+++ b/rts/motoko-rts-tests/src/gc/enhanced.rs
@@ -0,0 +1,321 @@
+use super::heap::MotokoHeap;
+use super::utils::{
+    get_scalar_value, make_pointer, read_word, unskew_pointer, ObjectIdx, GC, WORD_SIZE,
+};
+use crate::gc::{compute_reachable_objects, CheckMode};
+use fxhash::{FxHashMap, FxHashSet};
+use motoko_rts::types::*;
+use std::fmt::Write;
+
+impl GC {
+    pub fn run(&self, heap: &mut MotokoHeap, _round: usize) -> bool {
+        let static_root = heap.static_root_array_variable_address() as *mut Value;
+        let continuation_table_location = heap.continuation_table_variable_address() as *mut Value;
+        let region0_pointer_location = heap.region0_pointer_variable_address() as *mut Value;
+        let unused_root = &mut Value::from_scalar(0) as *mut Value;
+
+        unsafe {
+            use motoko_rts::gc::incremental::{get_incremental_gc_state, IncrementalGC};
+            const INCREMENTS_UNTIL_COMPLETION: usize = 16;
+            for _ in 0..INCREMENTS_UNTIL_COMPLETION {
+                let roots = [
+                    static_root,
+                    continuation_table_location,
+                    region0_pointer_location,
+                    unused_root,
+                    unused_root,
+                    unused_root,
+                ];
+                IncrementalGC::instance(heap, get_incremental_gc_state())
+                    .empty_call_stack_increment(roots);
+            }
+            false
+        }
+    }
+}
+
+/// Check the dynamic heap:
+///
+/// - All (and in post-gc mode, only) reachable objects should be in the heap. Reachable objects
+///   are those in the transitive closure of roots.
+///
+/// - Objects should point to right objects. E.g. if object with index X points to objects with
+///   indices Y and Z in the `objects` map, it should point to objects with indices Y and Z on the
+///   heap.
+///
+pub fn check_dynamic_heap(
+    mode: CheckMode,
+    objects: &[(ObjectIdx, Vec)],
+    roots: &[ObjectIdx],
+    continuation_table: &[ObjectIdx],
+    heap: &[u8],
+    heap_base_offset: usize,
+    heap_ptr_offset: usize,
+    static_root_array_variable_offset: usize,
+    continuation_table_variable_offset: usize,
+    region0_ptr_offset: usize,
+) {
+    let objects_map: FxHashMap = objects
+        .iter()
+        .map(|(obj, refs)| (*obj, refs.as_slice()))
+        .collect();
+
+    // Current offset in the heap
+    let mut offset = heap_base_offset;
+
+    // Maps objects to their addresses (not offsets!). Used when debugging duplicate objects.
+    let mut seen: FxHashMap = Default::default();
+
+    let static_root_array_address =
+        unskew_pointer(read_word(heap, static_root_array_variable_offset));
+    let static_root_array_offset = static_root_array_address as usize - heap.as_ptr() as usize;
+
+    let continuation_table_address =
+        unskew_pointer(read_word(heap, continuation_table_variable_offset));
+    let continuation_table_offset = continuation_table_address as usize - heap.as_ptr() as usize;
+
+    let region0_addr = unskew_pointer(read_word(heap, region0_ptr_offset));
+
+    while offset < heap_ptr_offset {
+        let object_offset = offset;
+
+        // Address of the current object. Used for debugging.
+        let address = offset + heap.as_ptr() as usize;
+
+        if object_offset == static_root_array_offset {
+            check_static_root_array(object_offset, roots, heap);
+            offset += (size_of::() + Words(roots.len()))
+                .to_bytes()
+                .as_usize();
+            continue;
+        }
+
+        if object_offset == continuation_table_offset {
+            check_continuation_table(object_offset, continuation_table, heap);
+            offset += (size_of::() + Words(continuation_table.len()))
+                .to_bytes()
+                .as_usize();
+            continue;
+        }
+
+        let tag = read_word(heap, offset);
+        offset += WORD_SIZE;
+
+        if tag == TAG_ONE_WORD_FILLER {
+        } else if tag == TAG_FREE_SPACE {
+            let words = read_word(heap, offset);
+            offset += WORD_SIZE;
+            offset += words * WORD_SIZE;
+        } else {
+            let forward;
+            forward = read_word(heap, offset);
+            offset += WORD_SIZE;
+
+            let is_forwarded = forward != make_pointer(address);
+
+            if tag == TAG_MUTBOX {
+                // MutBoxes of static root array, will be scanned indirectly when checking the static root array.
+                offset += WORD_SIZE;
+            } else if tag == TAG_BLOB_B {
+                assert!(!is_forwarded);
+                // in-heap mark stack blobs
+                let length = read_word(heap, offset);
+                offset += WORD_SIZE + length;
+            } else if tag == TAG_REGION {
+                if !is_forwarded {
+                    assert_eq!(address, region0_addr);
+                }
+                offset += (size_of::() - size_of::())
+                    .to_bytes()
+                    .as_usize();
+            } else if mode == CheckMode::Stabilzation && tag == TAG_MUTBOX {
+                offset += WORD_SIZE;
+            } else {
+                assert!(is_array_or_slice_tag(tag));
+
+                if is_forwarded {
+                    let forward_offset = forward - heap.as_ptr() as usize;
+                    let length = read_word(
+                        heap,
+                        forward_offset + size_of::().to_bytes().as_usize(),
+                    );
+
+                    // Skip stale object version that has been relocated during incremental GC.
+                    offset += length * WORD_SIZE;
+                } else {
+                    let n_fields = read_word(heap, offset);
+                    offset += WORD_SIZE;
+
+                    // There should be at least one field for the index
+                    assert!(n_fields >= 1);
+
+                    let object_idx = get_scalar_value(read_word(heap, offset));
+                    offset += WORD_SIZE;
+
+                    let old = seen.insert(object_idx, address);
+                    if let Some(old) = old {
+                        panic!(
+                            "Object with index {} seen multiple times: {:#x}, {:#x}",
+                            object_idx, old, address
+                        );
+                    }
+
+                    let object_expected_pointees =
+                        objects_map.get(&object_idx).unwrap_or_else(|| {
+                            panic!("Object with index {} is not in the objects map", object_idx)
+                        });
+
+                    for field_idx in 1..n_fields {
+                        let field = read_word(heap, offset);
+
+                        offset += WORD_SIZE;
+
+                        // Get index of the object pointed by the field
+                        let pointee_address = field.wrapping_add(1); // unskew
+
+                        let pointee_idx = read_object_id(pointee_address, heap);
+                        let expected_pointee_idx =
+                            object_expected_pointees[(field_idx - 1) as usize];
+                        assert_eq!(
+                            pointee_idx,
+                            expected_pointee_idx,
+                            "Object with index {} points to {} in field {}, but expected to point to {}",
+                            object_idx,
+                            pointee_idx,
+                            field_idx - 1,
+                            expected_pointee_idx,
+                        );
+                    }
+                }
+            }
+        }
+
+        skip_empty_partition_space(heap, &mut offset, heap_ptr_offset);
+    }
+
+    // At this point we've checked that all seen objects point to the expected objects (as
+    // specified by `objects`). Check that we've seen the reachable objects and only the reachable
+    // objects.
+    let reachable_objects = compute_reachable_objects(roots, continuation_table, &objects_map);
+
+    // Objects we've seen in the heap
+    let seen_objects: FxHashSet = seen.keys().copied().collect();
+
+    // Reachable objects that we haven't seen in the heap
+    let missing_objects: Vec = reachable_objects
+        .difference(&seen_objects)
+        .copied()
+        .collect();
+
+    let mut error_message = String::new();
+
+    if !missing_objects.is_empty() {
+        write!(
+            &mut error_message,
+            "{mode:?}: Reachable objects missing in the heap: {missing_objects:?}",
+        )
+        .unwrap();
+    }
+
+    if mode == CheckMode::AllReclaimed {
+        // Unreachable objects that we've seen in the heap
+        let extra_objects: Vec = seen_objects
+            .difference(&reachable_objects)
+            .copied()
+            .collect();
+
+        if !extra_objects.is_empty() {
+            if !error_message.is_empty() {
+                error_message.push('\n');
+            }
+
+            write!(
+                &mut error_message,
+                "Unreachable objects seen in the post-GC heap: {:?}",
+                extra_objects,
+            )
+            .unwrap();
+        }
+    }
+
+    if !error_message.is_empty() {
+        panic!("{}", error_message);
+    }
+}
+
+fn check_static_root_array(mut offset: usize, roots: &[ObjectIdx], heap: &[u8]) {
+    let array_address = heap.as_ptr() as usize + offset;
+    assert_eq!(read_word(heap, offset), TAG_ARRAY_M);
+    offset += WORD_SIZE;
+
+    assert_eq!(read_word(heap, offset), make_pointer(array_address));
+    offset += WORD_SIZE;
+
+    assert_eq!(read_word(heap, offset), roots.len());
+    offset += WORD_SIZE;
+
+    for obj in roots.iter() {
+        let mutbox_address = unskew_pointer(read_word(heap, offset));
+        offset += WORD_SIZE;
+
+        let object_address = unskew_pointer(read_mutbox_field(mutbox_address, heap));
+        let idx = read_object_id(object_address, heap);
+        assert_eq!(idx, *obj);
+    }
+}
+
+fn read_mutbox_field(mutbox_address: usize, heap: &[u8]) -> usize {
+    let mut mutbox_offset = mutbox_address - heap.as_ptr() as usize;
+
+    let mutbox_tag = read_word(heap, mutbox_offset);
+    assert_eq!(mutbox_tag, TAG_MUTBOX);
+    mutbox_offset += WORD_SIZE;
+
+    assert_eq!(read_word(heap, mutbox_offset), make_pointer(mutbox_address));
+    mutbox_offset += WORD_SIZE;
+
+    read_word(heap, mutbox_offset)
+}
+
+fn check_continuation_table(mut offset: usize, continuation_table: &[ObjectIdx], heap: &[u8]) {
+    let table_addr = heap.as_ptr() as usize + offset;
+    assert_eq!(read_word(heap, offset), TAG_ARRAY_M);
+    offset += WORD_SIZE;
+
+    assert_eq!(read_word(heap, offset), make_pointer(table_addr));
+    offset += WORD_SIZE;
+
+    assert_eq!(read_word(heap, offset), continuation_table.len());
+    offset += WORD_SIZE;
+
+    for obj in continuation_table.iter() {
+        let ptr = unskew_pointer(read_word(heap, offset));
+        offset += WORD_SIZE;
+
+        let idx = read_object_id(ptr, heap);
+        assert_eq!(idx, *obj);
+    }
+}
+
+fn read_object_id(object_address: usize, heap: &[u8]) -> ObjectIdx {
+    let tag = read_word(heap, object_address - heap.as_ptr() as usize);
+    assert!(is_array_or_slice_tag(tag));
+
+    // Skip object header for idx
+    let idx_address = object_address + size_of::().to_bytes().as_usize();
+    get_scalar_value(read_word(heap, idx_address - heap.as_ptr() as usize))
+}
+
+fn skip_empty_partition_space(heap: &[u8], offset: &mut usize, heap_ptr_offset: usize) {
+    use motoko_rts::gc::incremental::{get_partitioned_heap, partitioned_heap::PARTITION_SIZE};
+    let heap_start = heap.as_ptr() as usize;
+    while *offset < heap_ptr_offset {
+        let address = *offset + heap_start;
+        let partition_index = address / PARTITION_SIZE;
+        let partition = unsafe { get_partitioned_heap().get_partition(partition_index) };
+        if address < partition.dynamic_space_end() {
+            return;
+        }
+        *offset = (partition_index + 1) * PARTITION_SIZE - heap_start;
+    }
+}
diff --git a/rts/motoko-rts-tests/src/gc/generational.rs b/rts/motoko-rts-tests/src/gc/generational.rs
new file mode 100644
index 00000000000..5a86ed7cf10
--- /dev/null
+++ b/rts/motoko-rts-tests/src/gc/generational.rs
@@ -0,0 +1,10 @@
+mod mark_stack;
+mod remembered_set;
+
+pub fn test() {
+    println!("Testing generational GC ...");
+    unsafe {
+        mark_stack::test();
+        remembered_set::test();
+    }
+}
diff --git a/rts/motoko-rts-tests/src/gc/generational/mark_stack.rs b/rts/motoko-rts-tests/src/gc/generational/mark_stack.rs
new file mode 100644
index 00000000000..959b913e87d
--- /dev/null
+++ b/rts/motoko-rts-tests/src/gc/generational/mark_stack.rs
@@ -0,0 +1,89 @@
+use crate::memory::TestMemory;
+
+use motoko_rts::gc::generational::mark_stack::{
+    alloc_mark_stack, free_mark_stack, grow_stack, pop_mark_stack, push_mark_stack,
+    INIT_STACK_SIZE, STACK_BASE, STACK_PTR, STACK_TOP,
+};
+use motoko_rts::memory::Memory;
+use motoko_rts::types::*;
+
+use proptest::test_runner::{Config, TestCaseError, TestCaseResult, TestRunner};
+
+pub unsafe fn test() {
+    println!("  Testing mark stack ...");
+
+    test_push_pop();
+    test_grow_stack();
+}
+
+fn test_push_pop() {
+    println!("    Testing push/pop");
+
+    let mut proptest_runner = TestRunner::new(Config {
+        cases: 100,
+        failure_persistence: None,
+        ..Default::default()
+    });
+
+    proptest_runner
+        .run(&(0u32..1000u32), |n_objs| {
+            let mut mem = TestMemory::new(Words(1024 * 1024));
+            test_(&mut mem, n_objs)
+        })
+        .unwrap();
+}
+
+fn test_(mem: &mut M, n_objs: u32) -> TestCaseResult {
+    let objs: Vec = (0..n_objs).collect();
+
+    unsafe {
+        alloc_mark_stack(mem);
+
+        for obj in &objs {
+            push_mark_stack(mem, *obj as usize);
+        }
+
+        for obj in objs.iter().copied().rev() {
+            let popped = pop_mark_stack();
+            if popped != Some(obj as usize) {
+                free_mark_stack();
+                return Err(TestCaseError::Fail(
+                    format!(
+                        "Unexpected object popped, expected={:?}, popped={:?}",
+                        obj, popped
+                    )
+                    .into(),
+                ));
+            }
+        }
+
+        free_mark_stack();
+    }
+
+    Ok(())
+}
+
+unsafe fn test_grow_stack() {
+    println!("    Testing grow_stack");
+
+    // Allow doubling twice
+    let mut mem = TestMemory::new(
+        size_of::() + INIT_STACK_SIZE + INIT_STACK_SIZE + INIT_STACK_SIZE * 2,
+    );
+
+    alloc_mark_stack(&mut mem);
+
+    let mut current_size = INIT_STACK_SIZE.as_usize();
+    assert_eq!(STACK_BASE.add(current_size), STACK_TOP);
+    assert_eq!(STACK_BASE, STACK_PTR);
+
+    grow_stack(&mut mem);
+    current_size *= 2;
+    assert_eq!(STACK_BASE.add(current_size), STACK_TOP);
+    assert_eq!(STACK_BASE, STACK_PTR);
+
+    grow_stack(&mut mem);
+    current_size *= 2;
+    assert_eq!(STACK_BASE.add(current_size), STACK_TOP);
+    assert_eq!(STACK_BASE, STACK_PTR);
+}
diff --git a/rts/motoko-rts-tests/src/gc/generational/remembered_set.rs b/rts/motoko-rts-tests/src/gc/generational/remembered_set.rs
new file mode 100644
index 00000000000..691185f9a1c
--- /dev/null
+++ b/rts/motoko-rts-tests/src/gc/generational/remembered_set.rs
@@ -0,0 +1,96 @@
+use std::collections::HashSet;
+
+use crate::memory::TestMemory;
+use motoko_rts::gc::remembered_set::{
+    RememberedSet, INITIAL_TABLE_LENGTH, OCCUPATION_THRESHOLD_PERCENT,
+};
+use motoko_rts::types::{Value, Words};
+
+const GROW_LIMIT: usize = INITIAL_TABLE_LENGTH * OCCUPATION_THRESHOLD_PERCENT / 100;
+
+pub unsafe fn test() {
+    println!("  Testing remembered set ...");
+
+    test_remembered_set(0);
+    test_remembered_set(1);
+    test_remembered_set(INITIAL_TABLE_LENGTH / 2);
+    test_remembered_set(GROW_LIMIT - 1);
+    test_remembered_set(GROW_LIMIT);
+    test_remembered_set(GROW_LIMIT + 1);
+    test_remembered_set(INITIAL_TABLE_LENGTH);
+    test_remembered_set(2 * GROW_LIMIT - 1);
+    test_remembered_set(2 * GROW_LIMIT);
+    test_remembered_set(2 * GROW_LIMIT + 1);
+    test_remembered_set(128 * GROW_LIMIT);
+}
+
+unsafe fn test_remembered_set(amount: usize) {
+    test_insert_iterate(amount);
+    test_duplicates(amount);
+    test_collisions(amount);
+}
+
+unsafe fn test_insert_iterate(amount: usize) {
+    let mut mem = TestMemory::new(Words(2 * amount + 1024 * 1024));
+
+    let mut remembered_set = RememberedSet::new(&mut mem);
+    let mut test_set: HashSet = HashSet::new();
+    // start at 1 since 0 is the null ptr and not stored in the remembered set
+    for value in 1..amount + 1 {
+        remembered_set.insert(&mut mem, Value::from_raw(value));
+        test_set.insert(value);
+    }
+
+    let mut iterator = remembered_set.iterate();
+    for _ in 1..amount + 1 {
+        assert!(iterator.has_next());
+        let value = iterator.current().get_raw();
+        assert!(test_set.contains(&value));
+        iterator.next();
+    }
+    assert!(!iterator.has_next());
+}
+
+unsafe fn test_duplicates(amount: usize) {
+    let mut mem = TestMemory::new(Words(2 * amount + 1024 * 1024));
+
+    let mut remembered_set = RememberedSet::new(&mut mem);
+    // start at 1 since 0 is the null ptr and not stored in the remembered set
+    for value in 1..amount + 1 {
+        remembered_set.insert(&mut mem, Value::from_raw(value));
+    }
+
+    let count = remembered_set.count();
+    for value in 1..amount + 1 {
+        remembered_set.insert(&mut mem, Value::from_raw(value));
+        assert_eq!(remembered_set.count(), count);
+    }
+}
+
+unsafe fn test_collisions(amount: usize) {
+    let mut mem = TestMemory::new(Words(2 * amount + 1024 * 1024));
+
+    let mut remembered_set = RememberedSet::new(&mut mem);
+    let mut test_set: HashSet = HashSet::new();
+
+    // start at 1 since 0 is the null ptr and not stored in the remembered set
+    for index in 1..amount + 1 {
+        const FACTOR: usize = 1024 * 1024;
+        let value = if index <= usize::MAX / FACTOR {
+            index * FACTOR
+        } else {
+            index
+        };
+        remembered_set.insert(&mut mem, Value::from_raw(value));
+        test_set.insert(value);
+    }
+
+    let mut iterator = remembered_set.iterate();
+    for _ in 1..amount + 1 {
+        assert!(iterator.has_next());
+        let value = iterator.current().get_raw();
+        assert!(test_set.contains(&value));
+        iterator.next();
+    }
+    assert!(!iterator.has_next());
+}
diff --git a/rts/motoko-rts-tests/src/gc/heap.rs b/rts/motoko-rts-tests/src/gc/heap.rs
index 1b1018fac59..045deda114e 100644
--- a/rts/motoko-rts-tests/src/gc/heap.rs
+++ b/rts/motoko-rts-tests/src/gc/heap.rs
@@ -1,16 +1,20 @@
-use super::utils::{make_pointer, make_scalar, write_word, ObjectIdx, WORD_SIZE};
+use motoko_rts_macros::{
+    classical_persistence, enhanced_orthogonal_persistence, incremental_gc, non_incremental_gc,
+};
+
+#[classical_persistence]
+mod classical;
+#[enhanced_orthogonal_persistence]
+mod enhanced;
+
+use super::utils::{ObjectIdx, GC};
 
 use motoko_rts::memory::Memory;
 use motoko_rts::types::*;
 
 use std::cell::{Ref, RefCell};
-use std::convert::TryFrom;
 use std::rc::Rc;
 
-use fxhash::{FxHashMap, FxHashSet};
-
-use motoko_rts::gc::incremental::partitioned_heap::PARTITION_SIZE;
-
 /// Represents Motoko heaps. Reference counted (implements `Clone`) so we can clone and move values
 /// of this type to GC callbacks.
 #[derive(Clone)]
@@ -19,12 +23,12 @@ pub struct MotokoHeap {
 }
 
 impl Memory for MotokoHeap {
-    unsafe fn alloc_words(&mut self, n: Words) -> Value {
+    unsafe fn alloc_words(&mut self, n: Words) -> Value {
         self.inner.borrow_mut().alloc_words(n)
     }
 
-    unsafe fn grow_memory(&mut self, ptr: u64) {
-        self.inner.borrow_mut().grow_memory(ptr as usize);
+    unsafe fn grow_memory(&mut self, ptr: usize) {
+        self.inner.borrow_mut().grow_memory(ptr);
     }
 }
 
@@ -39,12 +43,16 @@ impl MotokoHeap {
         map: &[(ObjectIdx, Vec)],
         roots: &[ObjectIdx],
         continuation_table: &[ObjectIdx],
+        gc: GC,
+        free_space: usize,
     ) -> MotokoHeap {
         MotokoHeap {
             inner: Rc::new(RefCell::new(MotokoHeapInner::new(
                 map,
                 roots,
                 continuation_table,
+                gc,
+                free_space,
             ))),
         }
     }
@@ -65,6 +73,25 @@ impl MotokoHeap {
         self.inner.borrow().heap_ptr_address()
     }
 
+    /// Update the heap pointer given as an address in the current process.
+    #[non_incremental_gc]
+    pub fn set_heap_ptr_address(&self, address: usize) {
+        self.inner.borrow_mut().set_heap_ptr_address(address)
+    }
+
+    /// Get the last heap pointer, as address in the current process. The address can be used to mutate
+    /// the heap.
+    #[non_incremental_gc]
+    pub fn last_ptr_address(&self) -> usize {
+        self.inner.borrow().last_ptr_address()
+    }
+
+    /// Update the last heap pointer given as an address in the current process.
+    #[non_incremental_gc]
+    pub fn set_last_ptr_address(&self, address: usize) {
+        self.inner.borrow_mut().set_last_ptr_address(address)
+    }
+
     /// Get the beginning of dynamic heap, as an address in the current process
     pub fn heap_base_address(&self) -> usize {
         self.inner.borrow().heap_base_address()
@@ -96,6 +123,7 @@ impl MotokoHeap {
     }
 
     /// Get the address of the variable pointing to region0
+    #[incremental_gc]
     pub fn region0_pointer_variable_address(&self) -> usize {
         self.inner.borrow().region0_pointer_address()
     }
@@ -110,8 +138,8 @@ impl MotokoHeap {
     pub fn dump(&self) {
         unsafe {
             motoko_rts::debug::dump_heap(
-                self.heap_base_address() as u32,
-                self.heap_ptr_address() as u32,
+                self.heap_base_address(),
+                self.heap_ptr_address(),
                 self.static_root_array_variable_address() as *mut Value,
                 self.continuation_table_variable_address() as *mut Value,
             );
@@ -175,6 +203,18 @@ impl MotokoHeapInner {
         self.heap_ptr_offset = self.address_to_offset(address);
     }
 
+    /// Get last heap pointer (i.e. where the dynamic heap ends last GC run) in the process's address space
+    #[non_incremental_gc]
+    fn last_ptr_address(&self) -> usize {
+        self.offset_to_address(self._heap_ptr_last)
+    }
+
+    /// Set last heap pointer
+    #[non_incremental_gc]
+    fn set_last_ptr_address(&mut self, address: usize) {
+        self._heap_ptr_last = self.address_to_offset(address);
+    }
+
     /// Get the address of the variable pointing to the static root array.
     fn static_root_array_variable_address(&self) -> usize {
         self.offset_to_address(self.static_root_array_variable_offset)
@@ -186,99 +226,40 @@ impl MotokoHeapInner {
     }
 
     /// Get the address of the region0 pointer
+    #[incremental_gc]
     fn region0_pointer_address(&self) -> usize {
         self.offset_to_address(self.region0_pointer_variable_offset)
     }
 
-    fn new(
+    #[classical_persistence]
+    pub fn new(
         map: &[(ObjectIdx, Vec)],
         roots: &[ObjectIdx],
         continuation_table: &[ObjectIdx],
+        gc: GC,
+        _free_space: usize,
     ) -> MotokoHeapInner {
-        // Check test correctness: an object should appear at most once in `map`
-        {
-            let heap_objects: FxHashSet = map.iter().map(|(obj, _)| *obj).collect();
-            assert_eq!(
-                heap_objects.len(),
-                map.len(),
-                "Invalid test heap: some objects appear multiple times"
-            );
-        }
-
-        // Three pointers: Static root array, continuation table, and region 0.
-        let root_pointers_size_bytes = 3 * WORD_SIZE;
-
-        // Each object will have array header plus one word for id per object + one word for each reference.
-        // The static root is an array (header + length) with one element, one MutBox for each static variable.
-        let static_root_set_size_bytes = (size_of::().as_usize()
-            + roots.len()
-            + roots.len() * size_of::().as_usize())
-            * WORD_SIZE;
-
-        let dynamic_heap_size_without_roots = {
-            let object_headers_words = map.len() * (size_of::().as_usize() + 1);
-            let references_words = map.iter().map(|(_, refs)| refs.len()).sum::();
-            (object_headers_words + references_words) * WORD_SIZE
-        };
-
-        let continuation_table_size = (size_of::() + Words(continuation_table.len() as u32))
-            .to_bytes()
-            .as_usize();
-
-        let region0_size = size_of::().to_bytes().as_usize();
-
-        let dynamic_heap_size_bytes = dynamic_heap_size_without_roots
-            + static_root_set_size_bytes
-            + continuation_table_size
-            + region0_size;
-
-        let total_heap_size_bytes = root_pointers_size_bytes + dynamic_heap_size_bytes;
-
-        let heap_size = heap_size_for_gc();
-
-        // The Worst-case unalignment w.r.t. 32-byte alignment is 28 (assuming
-        // that we have general word alignment). So we over-allocate 28 bytes.
-        let mut heap = vec![0u8; heap_size + 28];
-
-        // Align the dynamic heap starts at a 32-byte multiple.
-        let realign = (32 - (heap.as_ptr() as usize + root_pointers_size_bytes) % 32) % 32;
-        assert_eq!(realign % 4, 0);
+        self::classical::new_heap(map, roots, continuation_table, gc)
+    }
 
-        // Maps `ObjectIdx`s into their offsets in the heap.
-        let (static_root_array_address, continuation_table_address, region0_address) =
-            create_dynamic_heap(
-                map,
-                roots,
-                continuation_table,
-                &mut heap[root_pointers_size_bytes + realign..heap_size + realign],
-            );
+    #[enhanced_orthogonal_persistence]
+    pub fn new(
+        map: &[(ObjectIdx, Vec)],
+        roots: &[ObjectIdx],
+        continuation_table: &[ObjectIdx],
+        _gc: GC,
+        free_space: usize,
+    ) -> MotokoHeapInner {
+        self::enhanced::new_heap(map, roots, continuation_table, free_space)
+    }
 
-        // Root pointers in static memory space.
-        let static_root_array_variable_offset = root_pointers_size_bytes - 3 * WORD_SIZE;
-        let continuation_table_variable_offset = root_pointers_size_bytes - 2 * WORD_SIZE;
-        let region0_pointer_variable_offset = root_pointers_size_bytes - WORD_SIZE;
-        create_static_memory(
-            static_root_array_variable_offset,
-            continuation_table_variable_offset,
-            region0_pointer_variable_offset,
-            static_root_array_address,
-            continuation_table_address,
-            region0_address,
-            &mut heap[realign..root_pointers_size_bytes + realign],
-        );
-
-        MotokoHeapInner {
-            heap: heap.into_boxed_slice(),
-            heap_base_offset: root_pointers_size_bytes + realign,
-            _heap_ptr_last: root_pointers_size_bytes + realign,
-            heap_ptr_offset: total_heap_size_bytes + realign,
-            static_root_array_variable_offset: static_root_array_variable_offset + realign,
-            continuation_table_variable_offset: continuation_table_variable_offset + realign,
-            region0_pointer_variable_offset: region0_pointer_variable_offset + realign,
-        }
+    #[non_incremental_gc]
+    unsafe fn alloc_words(&mut self, n: Words) -> Value {
+        self.linear_alloc_words(n)
     }
 
-    unsafe fn alloc_words(&mut self, n: Words) -> Value {
+    #[incremental_gc]
+    unsafe fn alloc_words(&mut self, n: Words) -> Value {
         let mut dummy_memory = DummyMemory {};
         let result =
             motoko_rts::gc::incremental::get_partitioned_heap().allocate(&mut dummy_memory, n);
@@ -287,7 +268,7 @@ impl MotokoHeapInner {
         self.linear_alloc_words(n)
     }
 
-    unsafe fn linear_alloc_words(&mut self, n: Words) -> Value {
+    unsafe fn linear_alloc_words(&mut self, n: Words) -> Value {
         // Update heap pointer
         let old_hp = self.heap_ptr_address();
         let new_hp = old_hp + n.to_bytes().as_usize();
@@ -313,206 +294,54 @@ impl MotokoHeapInner {
 struct DummyMemory {}
 
 impl Memory for DummyMemory {
-    unsafe fn alloc_words(&mut self, _n: Words) -> Value {
+    unsafe fn alloc_words(&mut self, _n: Words) -> Value {
         unreachable!()
     }
 
-    unsafe fn grow_memory(&mut self, _ptr: u64) {}
+    unsafe fn grow_memory(&mut self, _ptr: usize) {}
 }
 
 /// Compute the size of the heap to be allocated for the GC test.
-fn heap_size_for_gc() -> usize {
-    3 * PARTITION_SIZE
-}
-
-/// Given a heap description (as a map from objects to objects), and the dynamic part of the heap
-/// (as an array), initialize the dynamic heap with objects.
-///
-/// Returns a pair containing the address of the static root array and the address of the continuation table.
-fn create_dynamic_heap(
-    refs: &[(ObjectIdx, Vec)],
-    static_roots: &[ObjectIdx],
-    continuation_table: &[ObjectIdx],
-    dynamic_heap: &mut [u8],
-) -> (u32, u32, u32) {
-    let heap_start = dynamic_heap.as_ptr() as usize;
-
-    // Maps objects to their addresses
-    let mut object_addrs: FxHashMap = Default::default();
-
-    // First pass allocates objects without fields
-    {
-        let mut heap_offset = 0;
-        for (obj, refs) in refs {
-            object_addrs.insert(*obj, heap_start + heap_offset);
-
-            // Store object header
-            let address = u32::try_from(heap_start + heap_offset).unwrap();
-            write_word(dynamic_heap, heap_offset, TAG_ARRAY_M);
-            heap_offset += WORD_SIZE;
-
-            write_word(dynamic_heap, heap_offset, make_pointer(address)); // forwarding pointer
-            heap_offset += WORD_SIZE;
-
-            // Store length: idx + refs
-            write_word(
-                dynamic_heap,
-                heap_offset,
-                u32::try_from(refs.len() + 1).unwrap(),
-            );
-            heap_offset += WORD_SIZE;
-
-            // Store object value (idx)
-            write_word(dynamic_heap, heap_offset, make_scalar(*obj));
-            heap_offset += WORD_SIZE;
-
-            // Leave space for the fields
-            heap_offset += refs.len() * WORD_SIZE;
+#[non_incremental_gc]
+fn heap_size_for_gc(gc: GC, total_heap_size_bytes: usize, n_objects: usize) -> usize {
+    use super::utils::WORD_SIZE;
+    match gc {
+        GC::Copying => 2 * total_heap_size_bytes,
+        GC::MarkCompact => {
+            let bitmap_size_bytes = {
+                let heap_bytes = Bytes(total_heap_size_bytes);
+                // `...to_words().to_bytes()` below effectively rounds up heap size to word size
+                // then gets the bytes
+                let heap_words = heap_bytes.to_words();
+                let mark_bit_bytes = heap_words.to_bytes();
+
+                // The bitmap implementation rounds up to 64-bits to be able to read as many
+                // bits as possible in one instruction and potentially skip 64 words in the
+                // heap with single 64-bit comparison
+                (((mark_bit_bytes.as_usize() + 7) / 8) * 8)
+                    + size_of::().to_bytes().as_usize()
+            };
+            // In the worst case the entire heap will be pushed to the mark stack, but in tests
+            // we limit the size
+            let mark_stack_words = n_objects.clamp(
+                motoko_rts::gc::mark_compact::mark_stack::INIT_STACK_SIZE.as_usize(),
+                super::utils::MAX_MARK_STACK_SIZE,
+            ) + size_of::().as_usize();
+
+            total_heap_size_bytes + bitmap_size_bytes as usize + (mark_stack_words * WORD_SIZE)
         }
-    }
-
-    // println!("object addresses={:#?}", object_addrs);
-
-    // Second pass adds fields
-    for (obj, refs) in refs {
-        let obj_offset = object_addrs.get(obj).unwrap() - heap_start;
-        for (ref_idx, ref_) in refs.iter().enumerate() {
-            let ref_addr = make_pointer(*object_addrs.get(ref_).unwrap() as u32);
-            let field_offset = obj_offset
-                + (size_of::() + Words(1 + ref_idx as u32))
-                    .to_bytes()
-                    .as_usize();
-            write_word(dynamic_heap, field_offset, u32::try_from(ref_addr).unwrap());
+        GC::Generational => {
+            const ROUNDS: usize = 3;
+            const REMEMBERED_SET_MAXIMUM_SIZE: usize = 1024 * 1024 * WORD_SIZE;
+            let size = heap_size_for_gc(GC::MarkCompact, total_heap_size_bytes, n_objects);
+            size + ROUNDS * REMEMBERED_SET_MAXIMUM_SIZE
         }
     }
-
-    // Add the static root table
-    let n_objects = refs.len();
-    // fields+1 for the scalar field (idx)
-    let n_fields: usize = refs.iter().map(|(_, fields)| fields.len() + 1).sum();
-    let root_section_offset = (size_of::() * n_objects as u32)
-        .to_bytes()
-        .as_usize()
-        + n_fields * WORD_SIZE;
-
-    let mut heap_offset = root_section_offset;
-    let mut root_mutboxes = vec![];
-    {
-        for root_id in static_roots {
-            let mutbox_address = u32::try_from(heap_start + heap_offset).unwrap();
-            root_mutboxes.push(mutbox_address);
-            write_word(dynamic_heap, heap_offset, TAG_MUTBOX);
-            heap_offset += WORD_SIZE;
-
-            write_word(dynamic_heap, heap_offset, make_pointer(mutbox_address));
-            heap_offset += WORD_SIZE;
-
-            let root_ptr = *object_addrs.get(root_id).unwrap();
-            write_word(dynamic_heap, heap_offset, make_pointer(root_ptr as u32));
-            heap_offset += WORD_SIZE;
-        }
-    }
-    let static_root_array_address = u32::try_from(heap_start + heap_offset).unwrap();
-    {
-        write_word(dynamic_heap, heap_offset, TAG_ARRAY_M);
-        heap_offset += WORD_SIZE;
-
-        write_word(
-            dynamic_heap,
-            heap_offset,
-            make_pointer(static_root_array_address),
-        );
-        heap_offset += WORD_SIZE;
-
-        assert_eq!(static_roots.len(), root_mutboxes.len());
-        write_word(dynamic_heap, heap_offset, root_mutboxes.len() as u32);
-        heap_offset += WORD_SIZE;
-
-        for mutbox_address in root_mutboxes {
-            write_word(dynamic_heap, heap_offset, make_pointer(mutbox_address));
-            heap_offset += WORD_SIZE;
-        }
-    }
-
-    let continuation_table_address = u32::try_from(heap_start + heap_offset).unwrap();
-    {
-        write_word(dynamic_heap, heap_offset, TAG_ARRAY_M);
-        heap_offset += WORD_SIZE;
-
-        write_word(
-            dynamic_heap,
-            heap_offset,
-            make_pointer(continuation_table_address),
-        );
-        heap_offset += WORD_SIZE;
-
-        write_word(dynamic_heap, heap_offset, continuation_table.len() as u32);
-        heap_offset += WORD_SIZE;
-
-        for idx in continuation_table {
-            let idx_ptr = *object_addrs.get(idx).unwrap();
-            write_word(dynamic_heap, heap_offset, make_pointer(idx_ptr as u32));
-            heap_offset += WORD_SIZE;
-        }
-    }
-
-    // Add region0
-    let region0_address = u32::try_from(heap_start + heap_offset).unwrap();
-    {
-        write_word(dynamic_heap, heap_offset, TAG_REGION);
-        heap_offset += WORD_SIZE;
-
-        write_word(dynamic_heap, heap_offset, make_pointer(region0_address));
-        heap_offset += WORD_SIZE;
-
-        // lower part of region id
-        write_word(dynamic_heap, heap_offset, 0);
-        heap_offset += WORD_SIZE;
-        // upper part of region id
-        write_word(dynamic_heap, heap_offset, 0);
-        heap_offset += WORD_SIZE;
-        // zero pages
-        write_word(dynamic_heap, heap_offset, 0);
-        heap_offset += WORD_SIZE;
-        // Simplification: Skip the vector pages blob
-        write_word(dynamic_heap, heap_offset, make_scalar(0));
-    }
-
-    (
-        static_root_array_address,
-        continuation_table_address,
-        region0_address,
-    )
 }
 
-/// Static memory part containing the root pointers.
-fn create_static_memory(
-    static_root_array_variable_offset: usize,
-    continuation_table_variable_offset: usize,
-    region0_pointer_variable_offset: usize,
-    static_root_array_address: u32,
-    continuation_table_address: u32,
-    region0_address: u32,
-    heap: &mut [u8],
-) {
-    // Write static array pointer as the third last word in static memory
-    write_word(
-        heap,
-        static_root_array_variable_offset,
-        make_pointer(static_root_array_address),
-    );
-
-    // Write continuation table pointer as the second last word in static memory
-    write_word(
-        heap,
-        continuation_table_variable_offset,
-        make_pointer(continuation_table_address),
-    );
-
-    // Write region 0 pointer as the very last word in static memory
-    write_word(
-        heap,
-        region0_pointer_variable_offset,
-        make_pointer(region0_address),
-    );
+#[incremental_gc]
+fn heap_size_for_gc(gc: GC, _total_heap_size_bytes: usize, _n_objects: usize) -> usize {
+    match gc {
+        GC::Incremental => 3 * motoko_rts::gc::incremental::partitioned_heap::PARTITION_SIZE,
+    }
 }
diff --git a/rts/motoko-rts-tests/src/gc/heap/classical.rs b/rts/motoko-rts-tests/src/gc/heap/classical.rs
new file mode 100644
index 00000000000..e613f84708d
--- /dev/null
+++ b/rts/motoko-rts-tests/src/gc/heap/classical.rs
@@ -0,0 +1,294 @@
+use super::{heap_size_for_gc, MotokoHeapInner};
+
+use crate::gc::utils::{make_pointer, make_scalar, write_word, ObjectIdx, GC, WORD_SIZE};
+
+use motoko_rts::types::*;
+
+use fxhash::{FxHashMap, FxHashSet};
+
+pub(super) fn new_heap(
+    map: &[(ObjectIdx, Vec)],
+    roots: &[ObjectIdx],
+    continuation_table: &[ObjectIdx],
+    gc: GC,
+) -> MotokoHeapInner {
+    // Check test correctness: an object should appear at most once in `map`
+    {
+        let heap_objects: FxHashSet = map.iter().map(|(obj, _)| *obj).collect();
+        assert_eq!(
+            heap_objects.len(),
+            map.len(),
+            "Invalid test heap: some objects appear multiple times"
+        );
+    }
+
+    // Each object will have array header plus one word for id per object + one word for each reference. Static heap will
+    // have an array (header + length) with one element, one MutBox for each root. +1 for
+    // continuation table pointer.
+    let static_heap_size_bytes = (size_of::().as_usize()
+        + roots.len()
+        + (roots.len() * size_of::().as_usize())
+        + 2)
+        * WORD_SIZE;
+
+    let dynamic_heap_size_without_roots = {
+        let object_headers_words = map.len() * (size_of::().as_usize() + 1);
+        let references_words = map.iter().map(|(_, refs)| refs.len()).sum::();
+        (object_headers_words + references_words) * WORD_SIZE
+    };
+
+    let continuation_table_size = (size_of::() + Words(continuation_table.len()))
+        .to_bytes()
+        .as_usize();
+
+    let region0_size = size_of::().to_bytes().as_usize();
+
+    let dynamic_heap_size_bytes =
+        dynamic_heap_size_without_roots + continuation_table_size + region0_size;
+
+    let total_heap_size_bytes = static_heap_size_bytes + dynamic_heap_size_bytes;
+
+    let heap_size = heap_size_for_gc(gc, total_heap_size_bytes, map.len());
+
+    // The Worst-case unalignment w.r.t. 32-byte alignment is 28 (assuming
+    // that we have general word alignment). So we over-allocate 28 bytes.
+    let mut heap = vec![0u8; heap_size + 28];
+
+    // Align the dynamic heap starts at a 32-byte multiple.
+    let realign = (32 - (heap.as_ptr() as usize + static_heap_size_bytes) % 32) % 32;
+    assert_eq!(realign % 4, 0);
+
+    // Maps `ObjectIdx`s into their offsets in the heap.
+    let object_addrs: FxHashMap = create_dynamic_heap(
+        map,
+        continuation_table,
+        &mut heap[static_heap_size_bytes + realign..heap_size + realign],
+    );
+
+    // Closure table pointer is the second last word in the static heap.
+    let continuation_table_ptr_offset = static_heap_size_bytes - WORD_SIZE * 2;
+
+    // Region0 pointer is the very last word in the static heap.
+    let region0_ptr_location_offset = static_heap_size_bytes - WORD_SIZE;
+
+    create_static_heap(
+        roots,
+        &object_addrs,
+        continuation_table_ptr_offset,
+        static_heap_size_bytes + dynamic_heap_size_without_roots,
+        region0_ptr_location_offset,
+        static_heap_size_bytes + dynamic_heap_size_without_roots + continuation_table_size,
+        &mut heap[realign..static_heap_size_bytes + realign],
+    );
+
+    MotokoHeapInner {
+        heap: heap.into_boxed_slice(),
+        heap_base_offset: static_heap_size_bytes + realign,
+        _heap_ptr_last: static_heap_size_bytes + realign,
+        heap_ptr_offset: total_heap_size_bytes + realign,
+        static_root_array_variable_offset: realign,
+        continuation_table_variable_offset: continuation_table_ptr_offset + realign,
+        region0_pointer_variable_offset: region0_ptr_location_offset + realign,
+    }
+}
+
+/// Given a heap description (as a map from objects to objects), and the dynamic part of the heap
+/// (as an array), initialize the dynamic heap with objects.
+///
+/// Returns a mapping from object indices (`ObjectIdx`) to their addresses (see module
+/// documentation for "offset" and "address" definitions).
+fn create_dynamic_heap(
+    refs: &[(ObjectIdx, Vec)],
+    continuation_table: &[ObjectIdx],
+    dynamic_heap: &mut [u8],
+) -> FxHashMap {
+    let incremental = cfg!(feature = "incremental_gc");
+    let heap_start = dynamic_heap.as_ptr() as usize;
+
+    // Maps objects to their addresses
+    let mut object_addrs: FxHashMap = Default::default();
+
+    // First pass allocates objects without fields
+    {
+        let mut heap_offset = 0;
+        for (obj, refs) in refs {
+            object_addrs.insert(*obj, heap_start + heap_offset);
+
+            // Store object header
+            let address = heap_start + heap_offset;
+            write_word(dynamic_heap, heap_offset, TAG_ARRAY_M);
+            heap_offset += WORD_SIZE;
+
+            if incremental {
+                write_word(dynamic_heap, heap_offset, make_pointer(address)); // forwarding pointer
+                heap_offset += WORD_SIZE;
+            }
+
+            // Store length: idx + refs
+            write_word(dynamic_heap, heap_offset, refs.len() + 1);
+            heap_offset += WORD_SIZE;
+
+            // Store object value (idx)
+            write_word(dynamic_heap, heap_offset, make_scalar(*obj));
+            heap_offset += WORD_SIZE;
+
+            // Leave space for the fields
+            heap_offset += refs.len() * WORD_SIZE;
+        }
+    }
+
+    // println!("object addresses={:#?}", object_addrs);
+
+    // Second pass adds fields
+    for (obj, refs) in refs {
+        let obj_offset = object_addrs.get(obj).unwrap() - heap_start;
+        for (ref_idx, ref_) in refs.iter().enumerate() {
+            let ref_addr = make_pointer(*object_addrs.get(ref_).unwrap());
+            let field_offset = obj_offset
+                + (size_of::() + Words(1 + ref_idx))
+                    .to_bytes()
+                    .as_usize();
+            write_word(dynamic_heap, field_offset, ref_addr);
+        }
+    }
+
+    // Add the continuation table
+    let n_objects = refs.len();
+    // fields+1 for the scalar field (idx)
+    let n_fields: usize = refs.iter().map(|(_, fields)| fields.len() + 1).sum();
+    let continuation_table_offset =
+        (size_of::() * n_objects).to_bytes().as_usize() + n_fields * WORD_SIZE;
+    let continuation_table_size =
+        size_of::().to_bytes().as_usize() + continuation_table.len() * WORD_SIZE;
+
+    {
+        let mut heap_offset = continuation_table_offset;
+
+        let continuation_table_address = heap_start + heap_offset;
+        write_word(dynamic_heap, heap_offset, TAG_ARRAY_M);
+        heap_offset += WORD_SIZE;
+
+        if incremental {
+            write_word(
+                dynamic_heap,
+                heap_offset,
+                make_pointer(continuation_table_address),
+            );
+            heap_offset += WORD_SIZE;
+        }
+
+        write_word(dynamic_heap, heap_offset, continuation_table.len());
+        heap_offset += WORD_SIZE;
+
+        for idx in continuation_table {
+            let idx_ptr = *object_addrs.get(idx).unwrap();
+            write_word(dynamic_heap, heap_offset, make_pointer(idx_ptr));
+            heap_offset += WORD_SIZE;
+        }
+    }
+
+    // Add region0
+    let region0_offset = continuation_table_offset + continuation_table_size;
+    {
+        let mut heap_offset = region0_offset;
+        let region0_address = heap_start + heap_offset;
+        write_word(dynamic_heap, heap_offset, TAG_REGION);
+        heap_offset += WORD_SIZE;
+
+        if incremental {
+            write_word(dynamic_heap, heap_offset, make_pointer(region0_address));
+            heap_offset += WORD_SIZE;
+        }
+
+        // lower part of region id
+        write_word(dynamic_heap, heap_offset, 0);
+        heap_offset += WORD_SIZE;
+        // upper part of region id
+        write_word(dynamic_heap, heap_offset, 0);
+        heap_offset += WORD_SIZE;
+        // zero pages
+        write_word(dynamic_heap, heap_offset, 0);
+        heap_offset += WORD_SIZE;
+        // Simplification: Skip the vector pages blob
+        write_word(dynamic_heap, heap_offset, make_scalar(0));
+    }
+
+    object_addrs
+}
+
+/// Given a root set (`roots`, may contain duplicates), a mapping from object indices to addresses
+/// (`object_addrs`), and the static part of the heap, initialize the static heap with the static
+/// root array.
+fn create_static_heap(
+    roots: &[ObjectIdx],
+    object_addrs: &FxHashMap,
+    continuation_table_ptr_offset: usize,
+    continuation_table_offset: usize,
+    region0_ptr_offset: usize,
+    region0_offset: usize,
+    heap: &mut [u8],
+) {
+    let incremental = cfg!(feature = "incremental_gc");
+    let root_addresses: Vec = roots
+        .iter()
+        .map(|obj| *object_addrs.get(obj).unwrap())
+        .collect();
+
+    // Create static root array. Each element of the array is a MutBox pointing to the actual
+    // root.
+    let array_addr = heap.as_ptr() as usize;
+    let mut offset = 0;
+    write_word(heap, offset, TAG_ARRAY_M);
+    offset += WORD_SIZE;
+
+    if incremental {
+        write_word(heap, offset, make_pointer(array_addr));
+        offset += WORD_SIZE;
+    }
+
+    write_word(heap, offset, roots.len());
+    offset += WORD_SIZE;
+
+    // Current offset in the heap for the next static roots array element
+    let mut root_addr_offset = size_of::().to_bytes().as_usize();
+    assert_eq!(offset, root_addr_offset);
+
+    // Current offset in the heap for the MutBox of the next root
+    let mut mutbox_offset = (size_of::().as_usize() + roots.len()) * WORD_SIZE;
+
+    for root_address in root_addresses {
+        // Add a MutBox for the object
+        let mutbox_addr = heap.as_ptr() as usize + mutbox_offset;
+        let mutbox_ptr = make_pointer(mutbox_addr);
+
+        offset = mutbox_offset;
+        write_word(heap, offset, TAG_MUTBOX);
+        offset += WORD_SIZE;
+
+        if incremental {
+            write_word(heap, offset, mutbox_ptr);
+            offset += WORD_SIZE;
+        }
+
+        write_word(heap, offset, make_pointer(root_address));
+        offset += WORD_SIZE;
+
+        write_word(heap, root_addr_offset, mutbox_ptr);
+
+        root_addr_offset += WORD_SIZE;
+        mutbox_offset += size_of::().to_bytes().as_usize();
+        assert_eq!(offset, mutbox_offset);
+    }
+
+    // Write continuation table pointer as the second last word in static heap
+    let continuation_table_ptr = continuation_table_offset + heap.as_ptr() as usize;
+    write_word(
+        heap,
+        continuation_table_ptr_offset,
+        make_pointer(continuation_table_ptr),
+    );
+
+    // Write continuation table pointer as the second last word in static heap
+    let region0_ptr = region0_offset + heap.as_ptr() as usize;
+    write_word(heap, region0_ptr_offset, make_pointer(region0_ptr));
+}
diff --git a/rts/motoko-rts-tests/src/gc/heap/enhanced.rs b/rts/motoko-rts-tests/src/gc/heap/enhanced.rs
new file mode 100644
index 00000000000..853a468f308
--- /dev/null
+++ b/rts/motoko-rts-tests/src/gc/heap/enhanced.rs
@@ -0,0 +1,285 @@
+use super::{heap_size_for_gc, MotokoHeapInner};
+
+use crate::gc::utils::{make_pointer, make_scalar, write_word, ObjectIdx, GC, WORD_SIZE};
+
+use motoko_rts::types::*;
+
+use fxhash::{FxHashMap, FxHashSet};
+
+pub(super) fn new_heap(
+    map: &[(ObjectIdx, Vec)],
+    roots: &[ObjectIdx],
+    continuation_table: &[ObjectIdx],
+    free_space: usize,
+) -> MotokoHeapInner {
+    // Check test correctness: an object should appear at most once in `map`
+    {
+        let heap_objects: FxHashSet = map.iter().map(|(obj, _)| *obj).collect();
+        assert_eq!(
+            heap_objects.len(),
+            map.len(),
+            "Invalid test heap: some objects appear multiple times"
+        );
+    }
+
+    // Three pointers: Static root array, continuation table, and region 0.
+    let root_pointers_size_bytes = 3 * WORD_SIZE;
+
+    // Each object will have array header plus one word for id per object + one word for each reference.
+    // The static root is an array (header + length) with one element, one MutBox for each static variable.
+    let static_root_set_size_bytes = (size_of::().as_usize()
+        + roots.len()
+        + roots.len() * size_of::().as_usize())
+        * WORD_SIZE;
+
+    let dynamic_heap_size_without_roots = {
+        let object_headers_words = map.len() * (size_of::().as_usize() + 1);
+        let references_words = map.iter().map(|(_, refs)| refs.len()).sum::();
+        (object_headers_words + references_words) * WORD_SIZE
+    };
+
+    let continuation_table_size = (size_of::() + Words(continuation_table.len()))
+        .to_bytes()
+        .as_usize();
+
+    let region0_size = size_of::().to_bytes().as_usize();
+
+    let dynamic_heap_size_bytes = dynamic_heap_size_without_roots
+        + static_root_set_size_bytes
+        + continuation_table_size
+        + region0_size;
+
+    let total_heap_size_bytes = root_pointers_size_bytes + dynamic_heap_size_bytes;
+
+    let heap_size = heap_size_for_gc(GC::Incremental, total_heap_size_bytes, map.len());
+
+    const HEAP_ALIGNMENT: usize = usize::BITS as usize;
+    // The Worst-case unalignment is one word less than the intended heap alignment
+    // (assuming that we have general word alignment). So we over-allocate `HEAP_ALIGNMENT - WORD_SIZE` bytes.
+    let mut heap = vec![0u8; heap_size + HEAP_ALIGNMENT - WORD_SIZE + free_space];
+
+    // Align the dynamic heap start.
+    let realign = (HEAP_ALIGNMENT
+        - (heap.as_ptr() as usize + root_pointers_size_bytes) % HEAP_ALIGNMENT)
+        % HEAP_ALIGNMENT;
+    assert_eq!(realign % WORD_SIZE, 0);
+
+    // Maps `ObjectIdx`s into their offsets in the heap.
+    let (static_root_array_address, continuation_table_address, region0_address) =
+        create_dynamic_heap(
+            map,
+            roots,
+            continuation_table,
+            &mut heap[root_pointers_size_bytes + realign..heap_size + realign],
+        );
+
+    // Root pointers in static memory space.
+    let static_root_array_variable_offset = root_pointers_size_bytes - 3 * WORD_SIZE;
+    let continuation_table_variable_offset = root_pointers_size_bytes - 2 * WORD_SIZE;
+    let region0_pointer_variable_offset = root_pointers_size_bytes - WORD_SIZE;
+    create_static_memory(
+        static_root_array_variable_offset,
+        continuation_table_variable_offset,
+        region0_pointer_variable_offset,
+        static_root_array_address,
+        continuation_table_address,
+        region0_address,
+        &mut heap[realign..root_pointers_size_bytes + realign],
+    );
+
+    MotokoHeapInner {
+        heap: heap.into_boxed_slice(),
+        heap_base_offset: root_pointers_size_bytes + realign,
+        _heap_ptr_last: root_pointers_size_bytes + realign,
+        heap_ptr_offset: total_heap_size_bytes + realign,
+        static_root_array_variable_offset: static_root_array_variable_offset + realign,
+        continuation_table_variable_offset: continuation_table_variable_offset + realign,
+        region0_pointer_variable_offset: region0_pointer_variable_offset + realign,
+    }
+}
+
+/// Given a heap description (as a map from objects to objects), and the dynamic part of the heap
+/// (as an array), initialize the dynamic heap with objects.
+///
+/// Returns a pair containing the address of the static root array and the address of the continuation table.
+fn create_dynamic_heap(
+    refs: &[(ObjectIdx, Vec)],
+    static_roots: &[ObjectIdx],
+    continuation_table: &[ObjectIdx],
+    dynamic_heap: &mut [u8],
+) -> (usize, usize, usize) {
+    let heap_start = dynamic_heap.as_ptr() as usize;
+
+    // Maps objects to their addresses
+    let mut object_addrs: FxHashMap = Default::default();
+
+    // First pass allocates objects without fields
+    {
+        let mut heap_offset = 0;
+        for (obj, refs) in refs {
+            object_addrs.insert(*obj, heap_start + heap_offset);
+
+            // Store object header
+            let address = heap_start + heap_offset;
+            write_word(dynamic_heap, heap_offset, TAG_ARRAY_M);
+            heap_offset += WORD_SIZE;
+
+            write_word(dynamic_heap, heap_offset, make_pointer(address)); // forwarding pointer
+            heap_offset += WORD_SIZE;
+
+            // Store length: idx + refs
+            write_word(dynamic_heap, heap_offset, refs.len() + 1);
+            heap_offset += WORD_SIZE;
+
+            // Store object value (idx)
+            write_word(dynamic_heap, heap_offset, make_scalar(*obj));
+            heap_offset += WORD_SIZE;
+
+            // Leave space for the fields
+            heap_offset += refs.len() * WORD_SIZE;
+        }
+    }
+
+    // println!("object addresses={:#?}", object_addrs);
+
+    // Second pass adds fields
+    for (obj, refs) in refs {
+        let obj_offset = object_addrs.get(obj).unwrap() - heap_start;
+        for (ref_idx, ref_) in refs.iter().enumerate() {
+            let ref_addr = make_pointer(*object_addrs.get(ref_).unwrap());
+            let field_offset = obj_offset
+                + (size_of::() + Words(1 + ref_idx))
+                    .to_bytes()
+                    .as_usize();
+            write_word(dynamic_heap, field_offset, ref_addr);
+        }
+    }
+
+    // Add the static root table
+    let n_objects = refs.len();
+    // fields+1 for the scalar field (idx)
+    let n_fields: usize = refs.iter().map(|(_, fields)| fields.len() + 1).sum();
+    let root_section_offset =
+        (size_of::() * n_objects).to_bytes().as_usize() + n_fields * WORD_SIZE;
+
+    let mut heap_offset = root_section_offset;
+    let mut root_mutboxes = vec![];
+    {
+        for root_id in static_roots {
+            let mutbox_address = heap_start + heap_offset;
+            root_mutboxes.push(mutbox_address);
+            write_word(dynamic_heap, heap_offset, TAG_MUTBOX);
+            heap_offset += WORD_SIZE;
+
+            write_word(dynamic_heap, heap_offset, make_pointer(mutbox_address));
+            heap_offset += WORD_SIZE;
+
+            let root_ptr = *object_addrs.get(root_id).unwrap();
+            write_word(dynamic_heap, heap_offset, make_pointer(root_ptr));
+            heap_offset += WORD_SIZE;
+        }
+    }
+    let static_root_array_address = heap_start + heap_offset;
+    {
+        write_word(dynamic_heap, heap_offset, TAG_ARRAY_M);
+        heap_offset += WORD_SIZE;
+
+        write_word(
+            dynamic_heap,
+            heap_offset,
+            make_pointer(static_root_array_address),
+        );
+        heap_offset += WORD_SIZE;
+
+        assert_eq!(static_roots.len(), root_mutboxes.len());
+        write_word(dynamic_heap, heap_offset, root_mutboxes.len());
+        heap_offset += WORD_SIZE;
+
+        for mutbox_address in root_mutboxes {
+            write_word(dynamic_heap, heap_offset, make_pointer(mutbox_address));
+            heap_offset += WORD_SIZE;
+        }
+    }
+
+    let continuation_table_address = heap_start + heap_offset;
+    {
+        write_word(dynamic_heap, heap_offset, TAG_ARRAY_M);
+        heap_offset += WORD_SIZE;
+
+        write_word(
+            dynamic_heap,
+            heap_offset,
+            make_pointer(continuation_table_address),
+        );
+        heap_offset += WORD_SIZE;
+
+        write_word(dynamic_heap, heap_offset, continuation_table.len());
+        heap_offset += WORD_SIZE;
+
+        for idx in continuation_table {
+            let idx_ptr = *object_addrs.get(idx).unwrap();
+            write_word(dynamic_heap, heap_offset, make_pointer(idx_ptr));
+            heap_offset += WORD_SIZE;
+        }
+    }
+
+    // Add region0
+    let region0_address = heap_start + heap_offset;
+    {
+        write_word(dynamic_heap, heap_offset, TAG_REGION);
+        heap_offset += WORD_SIZE;
+
+        write_word(dynamic_heap, heap_offset, make_pointer(region0_address));
+        heap_offset += WORD_SIZE;
+
+        // lower part of region id
+        write_word(dynamic_heap, heap_offset, 0);
+        heap_offset += WORD_SIZE;
+        // upper part of region id
+        write_word(dynamic_heap, heap_offset, 0);
+        heap_offset += WORD_SIZE;
+        // zero pages
+        write_word(dynamic_heap, heap_offset, 0);
+        heap_offset += WORD_SIZE;
+        // Simplification: Skip the vector pages blob
+        write_word(dynamic_heap, heap_offset, make_scalar(0));
+    }
+
+    (
+        static_root_array_address,
+        continuation_table_address,
+        region0_address,
+    )
+}
+
+/// Static memory part containing the root pointers.
+fn create_static_memory(
+    static_root_array_variable_offset: usize,
+    continuation_table_variable_offset: usize,
+    region0_pointer_variable_offset: usize,
+    static_root_array_address: usize,
+    continuation_table_address: usize,
+    region0_address: usize,
+    heap: &mut [u8],
+) {
+    // Write static array pointer as the third last word in static memory
+    write_word(
+        heap,
+        static_root_array_variable_offset,
+        make_pointer(static_root_array_address),
+    );
+
+    // Write continuation table pointer as the second last word in static memory
+    write_word(
+        heap,
+        continuation_table_variable_offset,
+        make_pointer(continuation_table_address),
+    );
+
+    // Write region 0 pointer as the very last word in static memory
+    write_word(
+        heap,
+        region0_pointer_variable_offset,
+        make_pointer(region0_address),
+    );
+}
diff --git a/rts/motoko-rts-tests/src/gc/incremental/array_slicing.rs b/rts/motoko-rts-tests/src/gc/incremental/array_slicing.rs
index 72086c92f85..df2060a222c 100644
--- a/rts/motoko-rts-tests/src/gc/incremental/array_slicing.rs
+++ b/rts/motoko-rts-tests/src/gc/incremental/array_slicing.rs
@@ -24,7 +24,7 @@ pub unsafe fn test() {
     }
 }
 
-unsafe fn test_array_slicing(mem: &mut TestMemory, tag: Tag, array_length: u32) {
+unsafe fn test_array_slicing(mem: &mut TestMemory, tag: Tag, array_length: usize) {
     let array = alloc_array(mem, tag, array_length).as_array();
     let mut last_offset = 0;
     loop {
diff --git a/rts/motoko-rts-tests/src/gc/incremental/mark_bitmap.rs b/rts/motoko-rts-tests/src/gc/incremental/mark_bitmap.rs
index 163ab7d3f82..4df813cff71 100644
--- a/rts/motoko-rts-tests/src/gc/incremental/mark_bitmap.rs
+++ b/rts/motoko-rts-tests/src/gc/incremental/mark_bitmap.rs
@@ -14,7 +14,7 @@ use proptest::test_runner::{Config, TestCaseResult, TestRunner};
 
 pub unsafe fn test() {
     println!("  Testing mark bitmap ...");
-    let bitmap_size = Bytes(PARTITION_SIZE as u32).to_words();
+    let bitmap_size = Bytes(PARTITION_SIZE).to_words();
     let mut mem = TestMemory::new(bitmap_size);
     let bitmap_pointer = mem.alloc_words(bitmap_size);
 
@@ -34,32 +34,34 @@ pub unsafe fn test() {
 
     println!("  Testing bit iteration");
     proptest_runner
-        .run(&bit_index_set_strategy(), |bits| {
-            test_iterator_proptest(bitmap_pointer, bits)
+        .run(&bit_index_vector_strategy(), |bits| {
+            let mut hash_set = HashSet::new();
+            for value in bits {
+                hash_set.insert(value);
+            }
+            test_iterator_proptest(bitmap_pointer, hash_set)
         })
         .unwrap();
 
     test_last_bit(bitmap_pointer);
 }
 
-fn bit_index_vector_strategy() -> impl Strategy> {
-    proptest::collection::vec(0u16..u16::MAX, 0..1_000)
-}
+const MAX_TEST_BIT_INDEX: usize = u16::MAX as usize;
 
-fn bit_index_set_strategy() -> impl Strategy> {
-    proptest::collection::hash_set(0u16..u16::MAX, 0..1_000)
+fn bit_index_vector_strategy() -> impl Strategy> {
+    proptest::collection::vec(0..MAX_TEST_BIT_INDEX, 0..1_000)
 }
 
-fn test_mark_proptest(bitmap_pointer: Value, bits: Vec) -> TestCaseResult {
+fn test_mark_proptest(bitmap_pointer: Value, bits: Vec) -> TestCaseResult {
     test_mark(bitmap_pointer, bits);
     Ok(())
 }
 
-fn address_of_bit(bit: u16) -> usize {
-    bit as usize * WORD_SIZE as usize
+fn address_of_bit(bit: usize) -> usize {
+    bit * WORD_SIZE
 }
 
-fn test_mark(bitmap_pointer: Value, mut bits: Vec) {
+fn test_mark(bitmap_pointer: Value, mut bits: Vec) {
     unsafe {
         let mut bitmap = MarkBitmap::new();
         bitmap.assign(bitmap_pointer.get_ptr() as *mut u8);
@@ -71,7 +73,7 @@ fn test_mark(bitmap_pointer: Value, mut bits: Vec) {
             assert!(bitmap.is_marked(address_of_bit(*bit)));
         }
         bits.sort();
-        let mut last_bit: Option = None;
+        let mut last_bit: Option = None;
         for bit in bits {
             if let Some(last_bit) = last_bit {
                 for i in last_bit + 1..bit {
@@ -85,12 +87,12 @@ fn test_mark(bitmap_pointer: Value, mut bits: Vec) {
     }
 }
 
-fn test_iterator_proptest(bitmap_pointer: Value, bits: HashSet) -> TestCaseResult {
+fn test_iterator_proptest(bitmap_pointer: Value, bits: HashSet) -> TestCaseResult {
     test_iterator(bitmap_pointer, bits);
     Ok(())
 }
 
-fn test_iterator(bitmap_pointer: Value, bits: HashSet) {
+fn test_iterator(bitmap_pointer: Value, bits: HashSet) {
     unsafe {
         let mut bitmap = MarkBitmap::new();
         bitmap.assign(bitmap_pointer.get_ptr() as *mut u8);
@@ -117,7 +119,7 @@ fn test_iterator(bitmap_pointer: Value, bits: HashSet) {
 }
 
 fn test_last_bit(bitmap_pointer: Value) {
-    const LAST_OFFSET: usize = PARTITION_SIZE - WORD_SIZE as usize;
+    const LAST_OFFSET: usize = PARTITION_SIZE - WORD_SIZE;
     unsafe {
         let mut bitmap = MarkBitmap::new();
         bitmap.assign(bitmap_pointer.get_ptr() as *mut u8);
diff --git a/rts/motoko-rts-tests/src/gc/incremental/mark_stack.rs b/rts/motoko-rts-tests/src/gc/incremental/mark_stack.rs
index 373c9432e52..fca830d9653 100644
--- a/rts/motoko-rts-tests/src/gc/incremental/mark_stack.rs
+++ b/rts/motoko-rts-tests/src/gc/incremental/mark_stack.rs
@@ -33,12 +33,12 @@ unsafe fn internal_push_pop(
     regrow_step: usize,
 ) {
     for count in 0..amount {
-        stack.push(mem, Value::from_scalar(count as u32));
+        stack.push(mem, Value::from_scalar(count));
         if count == regrow_step {
             internal_push_pop(mem, stack, amount - count, regrow_step);
         }
     }
     for count in (0..amount).rev() {
-        assert_eq!(stack.pop().get_scalar() as usize, count);
+        assert_eq!(stack.pop().get_scalar(), count);
     }
 }
diff --git a/rts/motoko-rts-tests/src/gc/incremental/partitioned_heap.rs b/rts/motoko-rts-tests/src/gc/incremental/partitioned_heap.rs
index cac98d79f02..e9d82289f77 100644
--- a/rts/motoko-rts-tests/src/gc/incremental/partitioned_heap.rs
+++ b/rts/motoko-rts-tests/src/gc/incremental/partitioned_heap.rs
@@ -23,7 +23,8 @@ use motoko_rts::{
 
 use crate::{gc::utils::WORD_SIZE, memory::TestMemory};
 
-const NUMBER_OF_OBJECTS: usize = 2 * PARTITION_SIZE / 16;
+const OBJECT_SIZE: usize = size_of::() + WORD_SIZE;
+const NUMBER_OF_OBJECTS: usize = 2 * PARTITION_SIZE / OBJECT_SIZE;
 const HEAP_SIZE: usize = 4 * PARTITION_SIZE;
 
 pub unsafe fn test() {
@@ -103,10 +104,9 @@ unsafe fn iterate_partition(
 ) {
     while iterator.has_object() {
         let object = iterator.current_object();
-        //println!("FOUND {:#x} {}", object as usize, set.len());
         let array = Value::from_ptr(object as usize).as_array();
         let content = array.get(0).get_scalar();
-        let inserted = set.insert(content as usize);
+        let inserted = set.insert(content);
         assert!(inserted);
         progress(set.len(), NUMBER_OF_OBJECTS);
         assert_eq!(partition.get_index(), object as usize / PARTITION_SIZE);
@@ -121,7 +121,7 @@ unsafe fn iterate_partition(
 unsafe fn test_evacuation_plan(heap: &mut PartitionedTestHeap, occupied_partitions: usize) {
     println!("    Test evacuation plan...");
     unmark_all_objects(heap);
-    heap.inner.plan_evacuations();
+    heap.inner.plan_evacuations(&mut heap.memory);
     let mut iterator = PartitionedHeapIterator::new(&heap.inner);
     while iterator.has_partition() {
         let partition = iterator.current_partition(&heap.inner);
@@ -153,8 +153,6 @@ unsafe fn test_freeing_partitions(heap: &mut PartitionedTestHeap, occupied_parti
     heap.inner.complete_collection();
 }
 
-const OBJECT_SIZE: usize = size_of::() + WORD_SIZE;
-
 unsafe fn test_reallocations(heap: &mut PartitionedTestHeap) {
     println!("    Test reallocations...");
     let mut time = BoundedTime::new(0);
@@ -192,7 +190,7 @@ unsafe fn count_objects_in_partition(
         let object = iterator.current_object();
         assert_eq!(partition.get_index(), object as usize / PARTITION_SIZE);
         let array = Value::from_ptr(object as usize).as_array();
-        let content = array.get(0).get_scalar() as usize;
+        let content = array.get(0).get_scalar();
         assert!(content < NUMBER_OF_OBJECTS);
         time.tick();
         count += 1;
@@ -206,6 +204,11 @@ unsafe fn count_objects_in_partition(
 
 fn test_close_partition(heap: &mut PartitionedTestHeap) {
     println!("    Test close partition...");
+    // Due to Rust borrow check restrictions, preceding `plan_evacuations`
+    // allocates directly in `heap.memory` without synchronizing the heap
+    // pointer in `heap`. Therefore, re-align the heap pointer of `heap`
+    // with `heap.memory`.
+    heap.allocate_blob(0);
     test_close_partition_multi_word(heap);
     test_close_partition_single_word(heap);
 }
@@ -253,19 +256,28 @@ unsafe fn test_large_size_scenario() {
     println!("    Test large allocations...");
     const LARGE: usize = PARTITION_SIZE + WORD_SIZE;
     const EXTRA_LARGE: usize = 2 * PARTITION_SIZE;
-    test_allocation_sizes(&[32, PARTITION_SIZE, 16], 3);
-    test_allocation_sizes(&[28, LARGE, 20], 3);
-    test_allocation_sizes(&[24, LARGE, LARGE, 36], 5);
-    test_allocation_sizes(&[24, EXTRA_LARGE, 16], 3);
-    test_allocation_sizes(&[24, EXTRA_LARGE, LARGE, 16], 6);
-    test_allocation_sizes(&[24, EXTRA_LARGE, 32, LARGE, 16], 6);
+    test_allocation_sizes(&[8 * WORD_SIZE, PARTITION_SIZE, 4 * WORD_SIZE], 3);
+    test_allocation_sizes(&[7 * WORD_SIZE, LARGE, 5 * WORD_SIZE], 3);
+    test_allocation_sizes(&[6 * WORD_SIZE, LARGE, LARGE, 9 * WORD_SIZE], 5);
+    test_allocation_sizes(&[6 * WORD_SIZE, EXTRA_LARGE, 4 * WORD_SIZE], 3);
+    test_allocation_sizes(&[6 * WORD_SIZE, EXTRA_LARGE, LARGE, 4 * WORD_SIZE], 6);
+    test_allocation_sizes(
+        &[
+            6 * WORD_SIZE,
+            EXTRA_LARGE,
+            8 * WORD_SIZE,
+            LARGE,
+            4 * WORD_SIZE,
+        ],
+        6,
+    );
 }
 
 unsafe fn test_allocation_sizes(sizes: &[usize], number_of_partitions: usize) {
     let total_partitions = number_of_partitions + 1; // Plus temporary partition.
     let mut heap = PartitionedTestHeap::new(total_partitions * PARTITION_SIZE);
     let heap_base = heap.heap_base();
-    let state = IncrementalGC::initial_gc_state(&mut heap, heap_base);
+    let state = IncrementalGC::::initial_gc_state(heap_base);
     set_incremental_gc_state(Some(state));
     assert!(heap.inner.occupied_size().as_usize() < PARTITION_SIZE + heap.heap_base());
     let mut time = BoundedTime::new(0);
@@ -283,7 +295,7 @@ unsafe fn test_allocation_sizes(sizes: &[usize], number_of_partitions: usize) {
     );
     iterate_large_objects(&heap.inner, sizes);
     unmark_all_objects(&mut heap);
-    heap.inner.plan_evacuations();
+    heap.inner.plan_evacuations(&mut heap.memory);
     heap.inner.collect_large_objects();
     heap.inner.complete_collection();
     heap.inner.start_collection(&mut heap.memory, &mut time);
@@ -343,7 +355,7 @@ unsafe fn create_test_heap() -> PartitionedTestHeap {
     println!("    Create test heap...");
     let mut heap = PartitionedTestHeap::new(HEAP_SIZE);
     let heap_base = heap.heap_base();
-    let state = IncrementalGC::initial_gc_state(&mut heap, heap_base);
+    let state = IncrementalGC::::initial_gc_state(heap_base);
     set_incremental_gc_state(Some(state));
     let mut time = BoundedTime::new(0);
     unsafe {
@@ -361,7 +373,7 @@ unsafe fn create_test_heap() -> PartitionedTestHeap {
 fn allocate_objects(heap: &mut PartitionedTestHeap) {
     for index in 0..NUMBER_OF_OBJECTS {
         progress(index + 1, NUMBER_OF_OBJECTS);
-        let value = Value::from_scalar(index as u32);
+        let value = Value::from_scalar(index);
         let array = heap.allocate_array(&[value]);
         unsafe {
             let object = array.get_ptr() as *mut Obj;
@@ -393,9 +405,10 @@ pub struct PartitionedTestHeap {
 
 impl PartitionedTestHeap {
     pub fn new(size: usize) -> PartitionedTestHeap {
-        let mut memory = TestMemory::new(Bytes(size as u32).to_words());
+        let memory = TestMemory::new(Bytes(size).to_words());
         let heap_base = memory.heap_base();
-        let inner = unsafe { PartitionedHeap::new(&mut memory, heap_base) };
+        let inner = PartitionedHeap::new(heap_base);
+        assert_eq!(inner.base_address(), heap_base);
         PartitionedTestHeap { memory, inner }
     }
 
@@ -409,24 +422,24 @@ impl PartitionedTestHeap {
 
     pub fn allocate_array(&mut self, elements: &[Value]) -> Value {
         unsafe {
-            let array = alloc_array(self, TAG_ARRAY_M, elements.len() as u32);
+            let array = alloc_array(self, TAG_ARRAY_M, elements.len());
             for index in 0..elements.len() {
                 let raw_array = array.as_array();
-                raw_array.set(index as u32, elements[index], self);
+                raw_array.set(index, elements[index], self);
             }
             array
         }
     }
 
     pub fn allocate_blob(&mut self, size: usize) -> Value {
-        unsafe { alloc_blob(self, TAG_BLOB_B, Bytes(size as u32)) }
+        unsafe { alloc_blob(self, TAG_BLOB_B, Bytes(size)) }
     }
 }
 
 unsafe fn block_size(block: *const Tag) -> usize {
     match *block {
         TAG_ARRAY_I | TAG_ARRAY_M | TAG_ARRAY_T | TAG_ARRAY_S => {
-            size_of::() + (block as *const Array).len() as usize * WORD_SIZE as usize
+            size_of::() + (block as *const Array).len() * WORD_SIZE
         }
         TAG_BLOB_B | TAG_BLOB_T | TAG_BLOB_P | TAG_BLOB_A => {
             size_of::() + (block as *const Blob).len().as_usize()
@@ -436,14 +449,14 @@ unsafe fn block_size(block: *const Tag) -> usize {
 }
 
 impl Memory for PartitionedTestHeap {
-    unsafe fn alloc_words(&mut self, size: Words) -> Value {
+    unsafe fn alloc_words(&mut self, size: Words) -> Value {
         let result = self.inner.allocate(&mut self.memory, size);
         self.memory
             .set_heap_pointer(result.get_ptr() + size.to_bytes().as_usize());
         result
     }
 
-    unsafe fn grow_memory(&mut self, ptr: u64) {
-        assert!(ptr as usize <= self.memory.heap_end());
+    unsafe fn grow_memory(&mut self, ptr: usize) {
+        assert!(ptr <= self.memory.heap_end());
     }
 }
diff --git a/rts/motoko-rts-tests/src/gc/incremental/roots.rs b/rts/motoko-rts-tests/src/gc/incremental/roots.rs
index 41f6fe195a7..52fe1fa0b24 100644
--- a/rts/motoko-rts-tests/src/gc/incremental/roots.rs
+++ b/rts/motoko-rts-tests/src/gc/incremental/roots.rs
@@ -1,3 +1,7 @@
+use motoko_rts_macros::{
+    classical_persistence, enhanced_orthogonal_persistence, uses_enhanced_orthogonal_persistence,
+};
+
 use std::{array::from_fn, mem::size_of, ptr::null_mut};
 
 use motoko_rts::{
@@ -7,17 +11,23 @@ use motoko_rts::{
 
 use crate::gc::{
     heap::MotokoHeap,
-    utils::{ObjectIdx, WORD_SIZE},
+    utils::{ObjectIdx, GC, WORD_SIZE},
 };
 
 pub unsafe fn test() {
     println!("  Testing roots...");
 
-    let object_map: [(ObjectIdx, Vec); 10] = from_fn(|id| (id as u32, vec![]));
+    let object_map: [(ObjectIdx, Vec); 10] = from_fn(|id| (id, vec![]));
     let root_ids = [2, 4, 6, 8];
     let continuation_ids = [3, 5, 7];
 
-    let heap = MotokoHeap::new(&object_map, &root_ids, &continuation_ids);
+    let heap = MotokoHeap::new(
+        &object_map,
+        &root_ids,
+        &continuation_ids,
+        GC::Incremental,
+        0,
+    );
     check_visit_static_roots(&heap, &root_ids);
     check_visit_continuation_table(&heap, &continuation_ids);
     check_visit_region0(&heap);
@@ -26,57 +36,93 @@ pub unsafe fn test() {
 unsafe fn check_visit_static_roots(heap: &MotokoHeap, root_ids: &[ObjectIdx]) {
     let roots = get_roots(heap);
     let mut visited_static_roots = vec![];
-    visit_roots(roots, &mut visited_static_roots, |context, field| {
-        let object = *field;
-        if object.tag() != TAG_REGION {
-            let array = object.as_array();
-            if array.len() == root_ids.len() as u32 {
-                for index in 0..array.len() {
-                    let mutbox_value = array.get(index);
-                    let mutbox = mutbox_value.as_mutbox();
-                    let root_address = (*mutbox).field.get_ptr();
-                    let root_id = object_id(heap, root_address);
-                    context.push(root_id);
+    visit_roots(
+        roots,
+        heap.heap_base_address(),
+        &mut visited_static_roots,
+        |context, field| {
+            let object = *field;
+            if object.tag() != TAG_REGION {
+                let array = object.as_array();
+                if uses_enhanced_orthogonal_persistence!() {
+                    if array.len() == root_ids.len() {
+                        for index in 0..array.len() {
+                            let mutbox_value = array.get(index);
+                            let mutbox = mutbox_value.as_mutbox();
+                            let root_address = (*mutbox).field.get_ptr();
+                            let root_id = object_id(heap, root_address);
+                            context.push(root_id);
+                        }
+                    }
+                } else {
+                    if array.len() == 1 {
+                        let id = object_id(&heap, array as usize);
+                        context.push(id);
+                    }
                 }
             }
-        }
-    });
+        },
+    );
     assert_eq!(visited_static_roots, root_ids);
 }
 
 unsafe fn check_visit_continuation_table(heap: &MotokoHeap, continuation_ids: &[ObjectIdx]) {
     let roots = get_roots(heap);
     let mut visited_continuations = vec![];
-    visit_roots(roots, &mut visited_continuations, |context, field| {
-        let object = *field;
-        if object.tag() != TAG_REGION {
-            let array = object.as_array();
-            if array.len() == continuation_ids.len() as u32 {
-                assert_eq!(context.len(), 0);
-                for index in 0..array.len() {
-                    let element = array.get(index);
-                    let id = object_id(&heap, element.get_ptr());
-                    context.push(id);
+    visit_roots(
+        roots,
+        heap.heap_base_address(),
+        &mut visited_continuations,
+        |context, field| {
+            let object = *field;
+            if object.tag() != TAG_REGION {
+                let array = object.as_array();
+                if array.len() == continuation_ids.len() {
+                    assert_eq!(context.len(), 0);
+                    for index in 0..array.len() {
+                        let element = array.get(index);
+                        let id = object_id(&heap, element.get_ptr());
+                        context.push(id);
+                    }
                 }
             }
-        }
-    });
+        },
+    );
     assert_eq!(visited_continuations, continuation_ids);
 }
 
 unsafe fn check_visit_region0(heap: &MotokoHeap) {
     let roots = get_roots(heap);
     let mut visited_region0 = false;
-    visit_roots(roots, &mut visited_region0, |visited, field| {
-        let object = *field;
-        if object.tag() == TAG_REGION {
-            assert!(!*visited);
-            *visited = true;
-        }
-    });
+    visit_roots(
+        roots,
+        heap.heap_base_address(),
+        &mut visited_region0,
+        |visited, field| {
+            let object = *field;
+            if object.tag() == TAG_REGION {
+                assert!(!*visited);
+                *visited = true;
+            }
+        },
+    );
     assert!(visited_region0);
 }
 
+#[classical_persistence]
+unsafe fn get_roots(heap: &MotokoHeap) -> Roots {
+    let static_roots = Value::from_ptr(heap.static_root_array_variable_address());
+    let continuation_table_location = heap.continuation_table_variable_address() as *mut Value;
+    let region0_ptr_location = heap.region0_pointer_variable_address() as *mut Value;
+    assert_ne!(continuation_table_location, null_mut());
+    Roots {
+        static_roots,
+        continuation_table_location,
+        region0_ptr_location,
+    }
+}
+
+#[enhanced_orthogonal_persistence]
 unsafe fn get_roots(heap: &MotokoHeap) -> Roots {
     let static_root = heap.static_root_array_variable_address() as *mut Value;
     let continuation_table_location = heap.continuation_table_variable_address() as *mut Value;
@@ -93,10 +139,9 @@ unsafe fn get_roots(heap: &MotokoHeap) -> Roots {
     ]
 }
 
-fn object_id(heap: &MotokoHeap, address: usize) -> u32 {
+fn object_id(heap: &MotokoHeap, address: usize) -> usize {
     let offset = address - heap.heap_base_address();
     const OBJECT_SIZE: usize = size_of::() + WORD_SIZE;
-    assert_eq!(OBJECT_SIZE, 16);
     assert_eq!(offset % OBJECT_SIZE, 0);
-    (offset / OBJECT_SIZE) as u32
+    offset / OBJECT_SIZE
 }
diff --git a/rts/motoko-rts-tests/src/gc/incremental/sort.rs b/rts/motoko-rts-tests/src/gc/incremental/sort.rs
index 1e6f92dc885..e641753110e 100644
--- a/rts/motoko-rts-tests/src/gc/incremental/sort.rs
+++ b/rts/motoko-rts-tests/src/gc/incremental/sort.rs
@@ -34,7 +34,11 @@ pub fn test() {
 }
 
 fn sort_test(array: &mut [usize]) {
-    sort(array, &|left, right| left.cmp(&right));
+    unsafe {
+        sort(array.as_mut_ptr(), array.len(), &|left, right| {
+            left.cmp(&right)
+        });
+    }
     check_sorted(array);
 }
 
diff --git a/rts/motoko-rts-tests/src/gc/random.rs b/rts/motoko-rts-tests/src/gc/random.rs
index 7be7fc41c4a..9afff801203 100644
--- a/rts/motoko-rts-tests/src/gc/random.rs
+++ b/rts/motoko-rts-tests/src/gc/random.rs
@@ -7,28 +7,28 @@ fn rand_bool(rng: &mut Rand32) -> bool {
     rng.rand_range(0..2) == 1
 }
 
-pub(super) fn generate(seed: u64, max_objects: u32) -> TestHeap {
+pub fn generate(seed: u64, max_objects: usize) -> TestHeap {
     let mut rng = Rand32::new(seed);
 
-    let n_objects = rng.rand_range(0..max_objects + 1);
+    let n_objects = rng.rand_range(0..max_objects as u32 + 1);
 
     let roots: Vec = (0..n_objects)
         .filter_map(|obj_idx| {
             if rand_bool(&mut rng) {
-                Some(obj_idx)
+                Some(obj_idx as usize)
             } else {
                 None
             }
         })
         .collect();
 
-    let heap: Vec<(ObjectIdx, Vec)> = (0..n_objects)
+    let heap: Vec<(ObjectIdx, Vec)> = (0..n_objects as usize)
         .map(|obj_idx| {
-            let n_fields = rng.rand_range(0..n_objects);
+            let n_fields = rng.rand_range(0..n_objects) as usize;
 
             let field_values = (0..n_fields)
                 .filter_map(|_field_idx| {
-                    let field_value = rng.rand_range(0..n_objects);
+                    let field_value = rng.rand_range(0..n_objects) as usize;
                     if field_value == obj_idx {
                         None
                     } else {
@@ -42,7 +42,7 @@ pub(super) fn generate(seed: u64, max_objects: u32) -> TestHeap {
         .collect();
 
     // Same as roots
-    let continuation_table: Vec = (0..n_objects)
+    let continuation_table: Vec = (0..n_objects as usize)
         .filter_map(|obj_idx| {
             if rand_bool(&mut rng) {
                 Some(obj_idx)
diff --git a/rts/motoko-rts-tests/src/gc/utils.rs b/rts/motoko-rts-tests/src/gc/utils.rs
index 48d3e6e9063..5e3d84523a9 100644
--- a/rts/motoko-rts-tests/src/gc/utils.rs
+++ b/rts/motoko-rts-tests/src/gc/utils.rs
@@ -1,43 +1,92 @@
+use std::mem::size_of;
+
 use byteorder::{ReadBytesExt, WriteBytesExt, LE};
+use motoko_rts_macros::{
+    classical_persistence, enhanced_orthogonal_persistence, incremental_gc, non_incremental_gc,
+};
 
 /// A unique object index, used in heap descriptions.
 ///
-/// These are written as scalar values in object payloads, so they can be at most 31 bits. Larger
-/// values will cause test failure in `make_scalar` below.
-pub type ObjectIdx = u32;
+/// These are written as scalar values in object payloads, so they can be at most `usize::BITS - 1` bits.
+/// Larger values will cause test failure in `make_scalar` below.
+pub type ObjectIdx = usize;
 
 /// Same as RTS `WORD_SIZE`, but `usize`
 pub const WORD_SIZE: usize = motoko_rts::constants::WORD_SIZE as usize;
 
+// Max allowed size for the mark stack in mark-compact GC tests
+#[non_incremental_gc]
+pub const MAX_MARK_STACK_SIZE: usize = 100;
+
+/// Enum for the GC implementations. GC functions are generic so we can't put them into arrays or
+/// other data types, we use this type instead.
+#[non_incremental_gc]
+#[derive(Debug, Clone, Copy, PartialEq)]
+pub enum GC {
+    Copying,
+    MarkCompact,
+    Generational,
+}
+
+#[incremental_gc]
+#[derive(Debug, Clone, Copy, PartialEq)]
+pub enum GC {
+    Incremental,
+}
+
+#[non_incremental_gc]
+pub static GC_IMPLS: [GC; 3] = [GC::Copying, GC::MarkCompact, GC::Generational];
+
+#[incremental_gc]
+pub static GC_IMPLS: [GC; 1] = [GC::Incremental];
+
+/// Read a little-endian (Wasm) word from given offset
+#[classical_persistence]
+pub fn read_word(heap: &[u8], offset: usize) -> usize {
+    assert_eq!(size_of::(), size_of::());
+    (&heap[offset..]).read_u32::().unwrap() as usize
+}
+
+/// Write a little-endian (Wasm) word to given offset
+#[classical_persistence]
+pub fn write_word(heap: &mut [u8], offset: usize, word: usize) {
+    assert_eq!(size_of::(), size_of::());
+    (&mut heap[offset..]).write_u32::(word as u32).unwrap()
+}
+
 /// Read a little-endian (Wasm) word from given offset
-pub fn read_word(heap: &[u8], offset: usize) -> u32 {
-    (&heap[offset..]).read_u32::().unwrap()
+#[enhanced_orthogonal_persistence]
+pub fn read_word(heap: &[u8], offset: usize) -> usize {
+    assert_eq!(size_of::(), size_of::());
+    (&heap[offset..]).read_u64::().unwrap() as usize
 }
 
 /// Write a little-endian (Wasm) word to given offset
-pub fn write_word(heap: &mut [u8], offset: usize, word: u32) {
-    (&mut heap[offset..]).write_u32::(word).unwrap()
+#[enhanced_orthogonal_persistence]
+pub fn write_word(heap: &mut [u8], offset: usize, word: usize) {
+    assert_eq!(size_of::(), size_of::());
+    (&mut heap[offset..]).write_u64::(word as u64).unwrap()
 }
 
 /// Make a scalar value to be used in heap object payload
-pub fn make_scalar(value: u32) -> u32 {
+pub fn make_scalar(value: usize) -> usize {
     // Scalar values can be at most 31 bits
     assert_eq!(value >> 31, 0);
     value << 1
 }
 
 /// Inverse of `make_scalar`
-pub fn get_scalar_value(scalar: u32) -> u32 {
+pub fn get_scalar_value(scalar: usize) -> usize {
     assert_eq!(scalar & 0b1, 0);
     scalar >> 1
 }
 
 /// Make a pointer value to be used in heap object payload
-pub fn make_pointer(addr: u32) -> u32 {
+pub fn make_pointer(addr: usize) -> usize {
     addr.wrapping_sub(1)
 }
 
 /// Inverse of `make_pointer`
-pub fn unskew_pointer(skewed_ptr: u32) -> u32 {
+pub fn unskew_pointer(skewed_ptr: usize) -> usize {
     skewed_ptr.wrapping_add(1)
 }
diff --git a/rts/motoko-rts-tests/src/leb128.rs b/rts/motoko-rts-tests/src/leb128.rs
index 8dcd53c4fe9..bac32c89d46 100644
--- a/rts/motoko-rts-tests/src/leb128.rs
+++ b/rts/motoko-rts-tests/src/leb128.rs
@@ -3,6 +3,7 @@ use motoko_rts::leb128::{
     leb128_decode_checked, leb128_encode, sleb128_decode_checked, sleb128_encode,
 };
 
+use motoko_rts_macros::{classical_persistence, enhanced_orthogonal_persistence};
 use proptest::test_runner::{Config, TestCaseError, TestCaseResult, TestRunner};
 
 pub unsafe fn test() {
@@ -17,23 +18,27 @@ pub unsafe fn test() {
     roundtrip_signed(1).unwrap();
     roundtrip_signed(0).unwrap();
     roundtrip_signed(-1).unwrap();
-    roundtrip_signed(i32::MIN).unwrap(); // -2147483648
-    roundtrip_signed(i32::MAX).unwrap(); // 2147483647
+    roundtrip_signed(isize::MIN).unwrap();
+    roundtrip_signed(isize::MAX).unwrap();
 
     proptest_runner
-        .run(&proptest::num::i32::ANY, roundtrip_signed)
+        .run(&proptest::num::isize::ANY, roundtrip_signed)
         .unwrap();
 
     roundtrip_unsigned(1).unwrap();
     roundtrip_unsigned(0).unwrap();
-    roundtrip_unsigned(u32::MIN).unwrap();
-    roundtrip_unsigned(u32::MAX).unwrap();
+    roundtrip_unsigned(usize::MIN).unwrap();
+    roundtrip_unsigned(usize::MAX).unwrap();
 
     proptest_runner
-        .run(&proptest::num::u32::ANY, roundtrip_unsigned)
+        .run(&proptest::num::usize::ANY, roundtrip_unsigned)
         .unwrap();
 
-    // Check overflows
+    check_overflows();
+}
+
+#[classical_persistence]
+unsafe fn check_overflows() {
     check_signed_decode_overflow(&[
         0b1111_1111,
         0b1111_1111,
@@ -59,7 +64,49 @@ pub unsafe fn test() {
     ]); // u32::MAX + 1
 }
 
-fn roundtrip_signed(val: i32) -> TestCaseResult {
+#[enhanced_orthogonal_persistence]
+unsafe fn check_overflows() {
+    check_signed_decode_overflow(&[
+        0b1111_1111,
+        0b1111_1111,
+        0b1111_1111,
+        0b1111_1111,
+        0b1111_1111,
+        0b1111_1111,
+        0b1111_1111,
+        0b1111_1111,
+        0b1111_1111,
+        0b0001_0001,
+    ]); // isize::MIN - 1
+
+    check_signed_decode_overflow(&[
+        0b1000_0000,
+        0b1000_0000,
+        0b1000_0000,
+        0b1000_0000,
+        0b1000_0000,
+        0b1000_0000,
+        0b1000_0000,
+        0b1000_0000,
+        0b1000_0000,
+        0b0000_0001,
+    ]); // isize::MAX + 1
+
+    check_unsigned_decode_overflow(&[
+        0b1000_0000,
+        0b1000_0000,
+        0b1000_0000,
+        0b1000_0000,
+        0b1000_0000,
+        0b1000_0000,
+        0b1000_0000,
+        0b1000_0000,
+        0b1000_0000,
+        0b0000_0010,
+    ]); // usize::MAX + 1
+}
+
+fn roundtrip_signed(val: isize) -> TestCaseResult {
     unsafe {
         let mut buf = [0u8; 100];
         sleb128_encode(val, buf.as_mut_ptr());
@@ -86,7 +133,7 @@ fn roundtrip_signed(val: i32) -> TestCaseResult {
     }
 }
 
-fn roundtrip_unsigned(val: u32) -> TestCaseResult {
+fn roundtrip_unsigned(val: usize) -> TestCaseResult {
     unsafe {
         let mut buf = [0u8; 100];
         leb128_encode(val, buf.as_mut_ptr());
diff --git a/rts/motoko-rts-tests/src/main.rs b/rts/motoko-rts-tests/src/main.rs
index 7e0b3e1dcfb..2a88243b4e1 100644
--- a/rts/motoko-rts-tests/src/main.rs
+++ b/rts/motoko-rts-tests/src/main.rs
@@ -1,5 +1,10 @@
 #![feature(proc_macro_hygiene)]
 
+use motoko_rts_macros::{classical_persistence, enhanced_orthogonal_persistence};
+
+#[macro_use]
+mod print;
+
 mod bigint;
 mod bitrel;
 mod continuation_table;
@@ -8,20 +13,17 @@ mod gc;
 mod leb128;
 mod memory;
 mod principal_id;
+
+#[enhanced_orthogonal_persistence]
+mod stabilization;
 mod stable_option;
 mod text;
 mod utf8;
 
-use motoko_rts::types::{read64, write64, Bytes};
-
 fn main() {
-    if std::mem::size_of::() != 4 {
-        println!("Motoko RTS only works on 32-bit architectures");
-        std::process::exit(1);
-    }
+    check_architecture();
 
     unsafe {
-        test_read_write_64_bit();
         bigint::test();
         bitrel::test();
         continuation_table::test();
@@ -29,13 +31,46 @@ fn main() {
         gc::test();
         leb128::test();
         principal_id::test();
+        persistence_test();
         stable_option::test();
         text::test();
         utf8::test();
     }
 }
 
+#[classical_persistence]
+fn check_architecture() {
+    if std::mem::size_of::() != 4 {
+        println!("Motoko RTS for classical persistence only works on 32-bit architectures");
+        std::process::exit(1);
+    }
+}
+
+#[enhanced_orthogonal_persistence]
+fn check_architecture() {
+    if std::mem::size_of::() != 8 {
+        println!(
+            "Motoko RTS for enhanced orthogonal persistence only works on 64-bit architectures"
+        );
+        std::process::exit(1);
+    }
+}
+
+#[enhanced_orthogonal_persistence]
+fn persistence_test() {
+    unsafe {
+        stabilization::test();
+    }
+}
+
+#[classical_persistence]
+fn persistence_test() {
+    test_read_write_64_bit();
+}
+
+#[classical_persistence]
 fn test_read_write_64_bit() {
+    use motoko_rts::types::{read64, write64};
     println!("Testing 64-bit read-write");
     const TEST_VALUE: u64 = 0x1234_5678_9abc_def0;
     let mut lower = 0u32;
@@ -48,8 +83,8 @@ fn test_read_write_64_bit() {
 
 // Called by the RTS to panic
 #[no_mangle]
-extern "C" fn rts_trap(ptr: *const u8, len: Bytes) -> ! {
-    let msg = unsafe { std::slice::from_raw_parts(ptr, len.as_usize()) };
+extern "C" fn rts_trap(ptr: *const u8, len: u32) -> ! {
+    let msg = unsafe { std::slice::from_raw_parts(ptr, len as usize) };
     match core::str::from_utf8(msg) {
         Err(err) => panic!(
             "rts_trap_with called with non-UTF8 string (error={:?}, string={:?})",
@@ -67,7 +102,14 @@ extern "C" fn bigint_trap() -> ! {
 
 // Called by the RTS for debug prints
 #[no_mangle]
-unsafe extern "C" fn print_ptr(ptr: usize, len: u32) {
-    let str: &[u8] = core::slice::from_raw_parts(ptr as *const u8, len as usize);
-    println!("[RTS] {}", String::from_utf8_lossy(str));
+unsafe extern "C" fn print_ptr(ptr: usize, len: usize) {
+    let str: &[u8] = core::slice::from_raw_parts(ptr as *const u8, len);
+    println!("[RTS] {}", &String::from_utf8_lossy(str));
+}
+
+// Program entry point by wasmtime
+#[enhanced_orthogonal_persistence]
+#[no_mangle]
+pub fn _start() {
+    main();
 }
diff --git a/rts/motoko-rts-tests/src/memory.rs b/rts/motoko-rts-tests/src/memory.rs
index 7e1280a9ae0..73211c52e45 100644
--- a/rts/motoko-rts-tests/src/memory.rs
+++ b/rts/motoko-rts-tests/src/memory.rs
@@ -1,7 +1,6 @@
-use motoko_rts::gc::incremental::partitioned_heap::PARTITION_SIZE;
-use motoko_rts::gc::incremental::{set_incremental_gc_state, IncrementalGC};
 use motoko_rts::memory::Memory;
 use motoko_rts::types::{Bytes, Value, Words};
+use motoko_rts_macros::{incremental_gc, non_incremental_gc};
 
 pub struct TestMemory {
     heap: Box<[u8]>,
@@ -9,25 +8,29 @@ pub struct TestMemory {
 }
 
 impl TestMemory {
-    pub fn new(size: Words) -> TestMemory {
+    pub fn new(size: Words) -> TestMemory {
         let bytes = size.to_bytes().as_usize();
         let heap = vec![0u8; bytes].into_boxed_slice();
         let hp = heap.as_ptr() as usize;
         TestMemory { heap, hp }
     }
 
+    #[incremental_gc]
     pub fn heap_base(&self) -> usize {
         self.heap.as_ptr() as usize
     }
 
+    #[incremental_gc]
     pub fn heap_end(&self) -> usize {
         self.heap_base() + self.heap.len()
     }
 
+    #[incremental_gc]
     pub fn heap_pointer(&self) -> usize {
         self.hp
     }
 
+    #[incremental_gc]
     pub fn set_heap_pointer(&mut self, heap_pointer: usize) {
         assert!(heap_pointer >= self.heap_base());
         assert!(heap_pointer <= self.heap_end());
@@ -36,7 +39,7 @@ impl TestMemory {
 }
 
 impl Memory for TestMemory {
-    unsafe fn alloc_words(&mut self, n: Words) -> Value {
+    unsafe fn alloc_words(&mut self, n: Words) -> Value {
         let bytes = n.to_bytes();
 
         // Update heap pointer
@@ -45,12 +48,12 @@ impl Memory for TestMemory {
         self.hp = new_hp;
 
         // Grow memory if needed
-        self.grow_memory(new_hp as u64);
+        self.grow_memory(new_hp);
 
         Value::from_ptr(old_hp)
     }
 
-    unsafe fn grow_memory(&mut self, ptr: u64) {
+    unsafe fn grow_memory(&mut self, ptr: usize) {
         let heap_end = self.heap.as_ptr() as usize + self.heap.len();
         if ptr as usize > heap_end {
             // We don't allow growing memory in tests, allocate large enough for the test
@@ -62,13 +65,29 @@ impl Memory for TestMemory {
     }
 }
 
+#[incremental_gc]
 pub unsafe fn initialize_test_memory() -> TestMemory {
-    let mut memory = TestMemory::new(Bytes(PARTITION_SIZE as u32).to_words());
-    let state = IncrementalGC::initial_gc_state(&mut memory, 0);
+    use motoko_rts::gc::incremental::partitioned_heap::PARTITION_SIZE;
+    use motoko_rts::gc::incremental::{set_incremental_gc_state, IncrementalGC};
+
+    let memory = TestMemory::new(Bytes(PARTITION_SIZE).to_words());
+    let state = IncrementalGC::::initial_gc_state(0);
     set_incremental_gc_state(Some(state));
     memory
 }
 
+#[non_incremental_gc]
+pub unsafe fn initialize_test_memory() -> TestMemory {
+    const TEST_MEMORY_SIZE: usize = 32 * 1024 * 1024;
+    TestMemory::new(Bytes(TEST_MEMORY_SIZE).to_words())
+}
+
+#[incremental_gc]
 pub unsafe fn reset_test_memory() {
+    use motoko_rts::gc::incremental::set_incremental_gc_state;
+
     set_incremental_gc_state(None);
 }
+
+#[non_incremental_gc]
+pub unsafe fn reset_test_memory() {}
diff --git a/rts/motoko-rts-tests/src/print.rs b/rts/motoko-rts-tests/src/print.rs
new file mode 100644
index 00000000000..05464001911
--- /dev/null
+++ b/rts/motoko-rts-tests/src/print.rs
@@ -0,0 +1,79 @@
+//! Support for standard output in wasmtime.
+
+#[link(wasm_import_module = "wasi_snapshot_preview1")]
+extern "C" {
+    fn fd_write(fd: i32, iovs: i32, iovs_len: i32, nwritten: i32) -> i32;
+}
+
+#[repr(C)]
+struct iov {
+    base: i32,
+    length: i32,
+}
+
+const BUFFER_LENGTH: usize = 1024;
+
+static mut WRITTEN: i32 = 0;
+static mut TEXT_BUFFER: [u8; BUFFER_LENGTH] = [0; BUFFER_LENGTH];
+static mut IO_VECTOR: iov = iov { base: 0, length: 0 };
+
+/// Truncates the text if it is longer than `BUFFER_LENGTH`.
+pub(crate) fn wasmtime_println(text: &str) {
+    unsafe {
+        let mut length = 0;
+        for byte in text.as_bytes() {
+            TEXT_BUFFER[length] = *byte;
+            length += 1;
+            if length + 1 >= TEXT_BUFFER.len() {
+                break;
+            }
+        }
+        TEXT_BUFFER[length] = '\n' as u8; // new-line is required for a working output
+        length += 1;
+
+        IO_VECTOR.base = &mut TEXT_BUFFER[0] as *mut u8 as i32;
+        IO_VECTOR.length = length as i32;
+
+        let io_vector_array = &mut IO_VECTOR as *mut iov;
+        const STANDARDD_OUTPUT: i32 = 1;
+        const IO_VECTOR_ARRAY_LENGTH: i32 = 1;
+        let written_pointer = (&mut WRITTEN) as *mut i32;
+
+        fd_write(
+            STANDARDD_OUTPUT,
+            io_vector_array as i32,
+            IO_VECTOR_ARRAY_LENGTH,
+            written_pointer as i32,
+        );
+    }
+}
+
+#[macro_export]
+macro_rules! println {
+    ($($arg:tt)*) => ({
+        {
+            use core::fmt::Write;
+            let mut output = String::new();
+            write!(&mut output, $($arg)*).unwrap();
+            crate::print::wasmtime_println(&output);
+        }
+    })
+}
+
+#[macro_export]
+macro_rules! print {
+    ($($arg:tt)*) => ({
+        println!($($arg)*);
+    })
+}
+
+#[macro_export]
+macro_rules! panic {
+    ($($arg:tt)*) => ({
+        use core::fmt::Write;
+        let mut output = String::from("[PANIC] ");
+        write!(&mut output, $($arg)*).unwrap();
+        crate::print::wasmtime_println(&output);
+        std::process::exit(1);
+    })
+}
diff --git a/rts/motoko-rts-tests/src/stabilization.rs b/rts/motoko-rts-tests/src/stabilization.rs
new file mode 100644
index 00000000000..d50d7bde35e
--- /dev/null
+++ b/rts/motoko-rts-tests/src/stabilization.rs
@@ -0,0 +1,182 @@
+mod layout;
+mod reader_writer;
+mod stable_bigints;
+mod stable_memory;
+
+use crate::{
+    gc::{
+        check_dynamic_heap, heap::MotokoHeap, random::generate, utils::GC, utils::WORD_SIZE,
+        CheckMode, TestHeap,
+    },
+    memory::TestMemory,
+    stabilization::stable_memory::clear_stable_memory,
+};
+use motoko_rts::{
+    memory::{alloc_array, Memory},
+    stabilization::{
+        deserialization::Deserialization, graph_copy::GraphCopy, serialization::Serialization,
+    },
+    types::{Value, Words, TAG_ARRAY_M},
+};
+use oorandom::Rand32;
+
+pub unsafe fn test() {
+    println!("Testing stabilization ...");
+    layout::test();
+    stable_bigints::test();
+    reader_writer::test();
+    test_stabilization();
+    reset_memory();
+}
+
+#[no_mangle]
+pub fn moc_stabilization_instruction_limit() -> u64 {
+    u64::MAX
+}
+
+#[no_mangle]
+pub fn moc_stable_memory_access_limit() -> u64 {
+    u64::MAX
+}
+
+#[no_mangle]
+pub fn ic0_performance_counter(_counter: u32) -> u64 {
+    0
+}
+
+// This is only called for graph copy increment limit testing.
+// Not used during RTS testing.
+#[no_mangle]
+pub fn deserialized_size() -> usize {
+    0
+}
+
+fn reset_gc(heap_base_address: usize) {
+    use motoko_rts::gc::incremental::{set_incremental_gc_state, IncrementalGC};
+
+    unsafe {
+        let state = IncrementalGC::::initial_gc_state(heap_base_address);
+        set_incremental_gc_state(Some(state));
+    }
+}
+
+fn clear_heap(heap: &mut MotokoHeap) {
+    reset_gc(heap.heap_base_address());
+}
+
+fn reset_memory() {
+    clear_stable_memory();
+    reset_main_memory();
+}
+
+fn reset_main_memory() {
+    reset_gc(0);
+}
+
+struct RandomHeap {
+    descriptor: TestHeap,
+    memory: MotokoHeap,
+}
+
+impl RandomHeap {
+    fn clear(&mut self) {
+        clear_heap(&mut self.memory);
+    }
+
+    fn set_new_root(&mut self, stable_root: Value) {
+        self.clear_continuation_table();
+        self.reset_root_array(stable_root);
+        // Set the stable root as sole static root pointer.
+        self.reset_descriptor();
+    }
+
+    fn clear_continuation_table(&mut self) {
+        let table_pointer = self.memory.continuation_table_variable_address() as *mut Value;
+        unsafe {
+            *table_pointer = alloc_array(&mut self.memory, TAG_ARRAY_M, 0);
+        }
+    }
+
+    fn reset_root_array(&mut self, stable_root: Value) {
+        let root_array_pointer = self.memory.static_root_array_variable_address() as *mut Value;
+        unsafe {
+            *root_array_pointer = stable_root;
+        }
+    }
+
+    fn reset_descriptor(&mut self) {
+        self.descriptor.continuation_table.clear();
+    }
+
+    fn old_stable_root(&self) -> Value {
+        let root_array_pointer = self.memory.static_root_array_variable_address() as *mut Value;
+        unsafe { *root_array_pointer }
+    }
+
+    fn check_heap(&self) {
+        check_dynamic_heap(
+            CheckMode::Stabilzation,
+            &self.descriptor.heap,
+            &self.descriptor.roots,
+            &self.descriptor.continuation_table,
+            &self.memory.heap().as_ref(),
+            self.memory.heap_base_offset(),
+            self.memory.heap_ptr_offset(),
+            self.memory.static_root_array_variable_offset(),
+            self.memory.continuation_table_variable_offset(),
+            self.memory.region0_pointer_variable_offset(),
+        )
+    }
+}
+
+fn random_heap(random: &mut Rand32, max_objects: usize) -> RandomHeap {
+    let descriptor = generate(random.rand_u32() as u64, max_objects);
+    let pointers: usize = descriptor
+        .heap
+        .iter()
+        .map(|(_, references)| references.len() + 1)
+        .sum();
+    let memory = descriptor.build(GC::Incremental, pointers * WORD_SIZE as usize);
+    RandomHeap { descriptor, memory }
+}
+
+fn test_stabilization() {
+    println!("  Testing serialization and deserialization ...");
+    const RANDOM_SEED: u64 = 4711;
+    let mut random = Rand32::new(RANDOM_SEED);
+    test_serialization_deserialization(&mut random, 100, 0);
+    test_serialization_deserialization(&mut random, 1000, 200);
+    test_serialization_deserialization(&mut random, 10_000, 5_000);
+    test_serialization_deserialization(&mut random, 20_000, 7_000);
+}
+
+fn test_serialization_deserialization(random: &mut Rand32, max_objects: usize, stable_start: u64) {
+    println!("    Testing with {max_objects} objects");
+    clear_stable_memory();
+    let mut heap = random_heap(random, max_objects);
+    let old_stable_root = heap.old_stable_root();
+
+    let stable_size = serialize(old_stable_root, stable_start);
+
+    heap.clear();
+
+    let stable_root = deserialize(&mut heap.memory, stable_start, stable_size);
+
+    heap.set_new_root(stable_root);
+    heap.check_heap();
+}
+
+fn serialize(old_stable_root: Value, stable_start: u64) -> u64 {
+    let mut memory = TestMemory::new(Words(0));
+    let mut serialization = Serialization::start(&mut memory, old_stable_root, stable_start);
+    serialization.copy_increment(&mut memory);
+    assert!(serialization.is_completed());
+    serialization.serialized_data_length()
+}
+
+fn deserialize(mem: &mut M, stable_start: u64, stable_size: u64) -> Value {
+    let mut deserialization = Deserialization::start(mem, stable_start, stable_size);
+    deserialization.copy_increment(mem);
+    assert!(deserialization.is_completed());
+    deserialization.get_stable_root()
+}
diff --git a/rts/motoko-rts-tests/src/stabilization/layout.rs b/rts/motoko-rts-tests/src/stabilization/layout.rs
new file mode 100644
index 00000000000..ccfd59cdbfd
--- /dev/null
+++ b/rts/motoko-rts-tests/src/stabilization/layout.rs
@@ -0,0 +1,32 @@
+use motoko_rts::stabilization::layout::StableObjectKind;
+
+pub unsafe fn test() {
+    println!("  Testing layout ...");
+
+    test_stable_tags();
+}
+
+fn test_stable_tags() {
+    for object_kind in [
+        StableObjectKind::ArrayImmutable,
+        StableObjectKind::ArrayMutable,
+        StableObjectKind::ArrayTuple,
+        StableObjectKind::ArraySharedFunction,
+        StableObjectKind::MutBox,
+        StableObjectKind::Object,
+        StableObjectKind::BlobBytes,
+        StableObjectKind::BlobText,
+        StableObjectKind::BlobPrincipal,
+        StableObjectKind::BlobActor,
+        StableObjectKind::Bits64Unsigned,
+        StableObjectKind::Bits64Signed,
+        StableObjectKind::Bits64Float,
+        StableObjectKind::Region,
+        StableObjectKind::Variant,
+        StableObjectKind::Concat,
+        StableObjectKind::BigInt,
+        StableObjectKind::Some,
+    ] {
+        assert!(object_kind.encode().decode() == object_kind);
+    }
+}
diff --git a/rts/motoko-rts-tests/src/stabilization/reader_writer.rs b/rts/motoko-rts-tests/src/stabilization/reader_writer.rs
new file mode 100644
index 00000000000..88de656f316
--- /dev/null
+++ b/rts/motoko-rts-tests/src/stabilization/reader_writer.rs
@@ -0,0 +1,378 @@
+use std::{array::from_fn, mem::size_of};
+
+use motoko_rts::stabilization::serialization::stable_memory_stream::{
+    ScanStream, StableMemoryStream, WriteStream,
+};
+use oorandom::Rand32;
+
+use crate::stabilization::stable_memory::ic0_stable64_read;
+
+pub unsafe fn test() {
+    println!("  Testing stable memory stream ...");
+    test_empy_reader_writer();
+    test_single_read_write();
+    test_single_update();
+    test_multiple_read_write();
+    test_multiple_updates();
+    test_skip_all();
+    test_interleaved_read_write();
+    test_interleaved_skip();
+    test_bulk_read_write();
+    test_raw_read_write();
+    test_randomized_read_write();
+}
+
+fn test_empy_reader_writer() {
+    println!("    Testing empty stream ...");
+    let mut reader_writer = StableMemoryStream::open(0);
+    assert!(reader_writer.scan_completed());
+    reader_writer.close();
+    assert_eq!(reader_writer.written_length(), 0);
+}
+
+fn test_single_read_write() {
+    println!("    Testing single read write ...");
+    let mut reader_writer = StableMemoryStream::open(0);
+    const NUMBER: u64 = 1234567890;
+    reader_writer.write(&NUMBER);
+    let result = reader_writer.read::();
+    assert_eq!(result, NUMBER);
+    assert!(reader_writer.scan_completed());
+    reader_writer.close();
+    assert_eq!(reader_writer.written_length(), size_of::() as u64);
+}
+
+fn test_single_update() {
+    println!("    Testing single update ...");
+    let mut reader_writer = StableMemoryStream::open(0);
+    const NUMBER: u64 = 1234567890;
+    reader_writer.write(&NUMBER);
+    let result = reader_writer.read::();
+    assert_eq!(result, NUMBER);
+    assert!(reader_writer.scan_completed());
+    const NEW_NUMBER: u64 = 321321321;
+    reader_writer.update(&NEW_NUMBER);
+    assert!(reader_writer.scan_completed());
+    let mut test_value = 0u64;
+    reader_writer.close();
+    assert_eq!(reader_writer.written_length(), size_of::() as u64);
+    ic0_stable64_read(
+        &mut test_value as *mut u64 as u64,
+        0,
+        size_of::() as u64,
+    );
+    assert_eq!(test_value, NEW_NUMBER);
+}
+
+fn test_multiple_read_write() {
+    println!("    Testing multiple read write ...");
+    let mut reader_writer = StableMemoryStream::open(0);
+    const AMOUNT: usize = 100_000;
+    for number in 0..AMOUNT {
+        reader_writer.write(&number);
+    }
+    for number in 0..AMOUNT {
+        assert!(!reader_writer.scan_completed());
+        let output = reader_writer.read::();
+        assert_eq!(output, number);
+    }
+    assert!(reader_writer.scan_completed());
+    reader_writer.close();
+    assert_eq!(
+        reader_writer.written_length(),
+        (AMOUNT * size_of::()) as u64
+    );
+}
+
+fn test_multiple_updates() {
+    println!("    Testing multiple read write ...");
+    let mut reader_writer = StableMemoryStream::open(0);
+    const AMOUNT: usize = 100_000;
+    const TOTAL_LENGTH: u64 = (AMOUNT * size_of::()) as u64;
+    for number in 0..AMOUNT {
+        reader_writer.write(&number);
+    }
+    for number in 0..AMOUNT {
+        assert!(!reader_writer.scan_completed());
+        let output = reader_writer.read::();
+        assert_eq!(output, number);
+        reader_writer.update(&(number * 2));
+    }
+    assert!(reader_writer.scan_completed());
+    reader_writer.close();
+    assert_eq!(reader_writer.written_length(), TOTAL_LENGTH);
+    let mut test_data = [0usize; AMOUNT];
+    ic0_stable64_read(&mut test_data[0] as *mut usize as u64, 0, TOTAL_LENGTH);
+    for index in 0..AMOUNT {
+        assert_eq!(test_data[index], index * 2)
+    }
+}
+
+fn test_skip_all() {
+    println!("    Testing skip all ...");
+    let mut reader_writer = StableMemoryStream::open(0);
+    const AMOUNT: usize = 100_000;
+    let total_size = AMOUNT * size_of::();
+    for number in 0..AMOUNT {
+        reader_writer.write(&number);
+    }
+    assert!(!reader_writer.scan_completed());
+    reader_writer.skip(total_size);
+    assert!(reader_writer.scan_completed());
+    reader_writer.close();
+    assert_eq!(reader_writer.written_length(), total_size as u64);
+}
+
+fn test_interleaved_read_write() {
+    println!("    Testing interleaved read write ...");
+    let mut reader_writer = StableMemoryStream::open(0);
+    const AMOUNT: usize = 100_000;
+    for counter in 0..AMOUNT {
+        let input = (counter, counter * 2, counter * 3);
+        reader_writer.write(&input);
+        assert!(!reader_writer.scan_completed());
+        let output = reader_writer.read::<(usize, usize, usize)>();
+        assert_eq!(output, input);
+        assert!(reader_writer.scan_completed());
+    }
+    reader_writer.close();
+    assert_eq!(
+        reader_writer.written_length(),
+        (AMOUNT * size_of::<(usize, usize, usize)>()) as u64
+    );
+}
+
+fn test_interleaved_skip() {
+    println!("    Testing interleaved read skip ...");
+    let mut reader_writer = StableMemoryStream::open(0);
+    let value_size = size_of::<(usize, usize, usize)>();
+    const AMOUNT: usize = 100_000;
+    for counter in 0..AMOUNT {
+        reader_writer.write(&(counter, counter * 2, counter * 3));
+        assert!(!reader_writer.scan_completed());
+    }
+    for counter in 0..AMOUNT {
+        if counter % 2 == 0 {
+            reader_writer.skip(value_size)
+        } else {
+            let output = reader_writer.read::<(usize, usize, usize)>();
+            assert_eq!(output, (counter, counter * 2, counter * 3));
+        }
+    }
+    reader_writer.close();
+    assert_eq!(reader_writer.written_length(), (AMOUNT * value_size) as u64);
+}
+
+fn test_bulk_read_write() {
+    println!("    Testing bulk read write ...");
+    const LENGTH: usize = 99_999;
+    let input: [u8; LENGTH] = from_fn(|index| index as u8);
+    let mut reader_writer = StableMemoryStream::open(0);
+    reader_writer.write(&input);
+    assert!(!reader_writer.scan_completed());
+    let mut output = [0u8; LENGTH];
+    reader_writer.raw_read(&mut output as *mut u8 as usize, LENGTH);
+    assert_eq!(input, output);
+    assert!(reader_writer.scan_completed());
+    reader_writer.close();
+    assert_eq!(reader_writer.written_length(), LENGTH as u64);
+}
+
+fn test_raw_read_write() {
+    println!("    Testing raw read write ...");
+    const LENGTH: usize = 99;
+    let mut reader_writer = StableMemoryStream::open(0);
+    const AMOUNT: usize = 100;
+    for counter in 0..AMOUNT {
+        let input: [u8; LENGTH] = from_fn(|index| (counter + index) as u8);
+        reader_writer.raw_write(&input[0] as *const u8 as usize, LENGTH);
+        assert!(!reader_writer.scan_completed());
+    }
+    for counter in 0..AMOUNT {
+        let mut output = [0u8; LENGTH];
+        reader_writer.raw_read(&mut output[0] as *mut u8 as usize, LENGTH);
+        assert_eq!(from_fn(|index| (counter + index) as u8), output);
+    }
+    assert!(reader_writer.scan_completed());
+    reader_writer.close();
+    assert_eq!(reader_writer.written_length(), (AMOUNT * LENGTH) as u64);
+}
+
+#[repr(C)]
+#[derive(Debug, PartialEq, Default)]
+struct RandomRecord {
+    field_0: i8,
+    padding_0: i8,
+    field_1: i16,
+    field_2: i32,
+    field_4: i64,
+    field_5: i128,
+    field_6: f64,
+    padding_6: i64,
+}
+
+type RandomTuple = (f64, i64);
+type RandomArray = [i64; 100];
+type RandomBulkData = [u8; 10_000];
+
+#[derive(Debug, PartialEq)]
+enum RandomValue {
+    SingleByte(u8),
+    SimpleNumber(usize),
+    LargeNumber(u128),
+    TupleValue(RandomTuple),
+    RecordValue(RandomRecord),
+    ArrayValue(RandomArray),
+    BulkValue(RandomBulkData),
+}
+
+impl RandomValue {
+    fn generate(random: &mut Rand32) -> RandomValue {
+        match random.rand_u32() % 7 {
+            0 => RandomValue::SingleByte(random.rand_u32() as u8),
+            1 => RandomValue::SimpleNumber(random.rand_u32() as usize),
+            2 => RandomValue::LargeNumber(
+                random.rand_u32() as u128
+                    * random.rand_u32() as u128
+                    * random.rand_u32() as u128
+                    * random.rand_u32() as u128,
+            ),
+            3 => RandomValue::TupleValue((
+                random.rand_float() as f64,
+                random.rand_i32() as i64 * random.rand_i32() as i64,
+            )),
+            4 => RandomValue::RecordValue(RandomRecord {
+                field_0: random.rand_i32() as i8,
+                padding_0: 0,
+                field_1: random.rand_i32() as i16,
+                field_2: random.rand_i32(),
+                field_4: random.rand_i32() as i64,
+                field_5: random.rand_i32() as i128,
+                field_6: random.rand_float() as f64,
+                padding_6: 0,
+            }),
+            5 => RandomValue::ArrayValue(from_fn(|_| random.rand_i32() as i64)),
+            6 => RandomValue::BulkValue(from_fn(|_| random.rand_u32() as u8)),
+            _ => unreachable!(),
+        }
+    }
+
+    fn size(&self) -> usize {
+        match self {
+            RandomValue::SingleByte(_) => size_of::(),
+            RandomValue::SimpleNumber(_) => size_of::(),
+            RandomValue::LargeNumber(_) => size_of::(),
+            RandomValue::TupleValue(_) => size_of::(),
+            RandomValue::RecordValue(_) => size_of::(),
+            RandomValue::ArrayValue(_) => size_of::(),
+            RandomValue::BulkValue(_) => size_of::(),
+        }
+    }
+
+    fn write(&self, reader_writer: &mut StableMemoryStream) {
+        match self {
+            RandomValue::SingleByte(value) => reader_writer.write(value),
+            RandomValue::SimpleNumber(value) => reader_writer.write(value),
+            RandomValue::LargeNumber(value) => reader_writer.write(value),
+            RandomValue::TupleValue(value) => reader_writer.write(value),
+            RandomValue::RecordValue(value) => reader_writer.write(value),
+            RandomValue::ArrayValue(value) => reader_writer.write(value),
+            RandomValue::BulkValue(value) => reader_writer.write(value),
+        }
+    }
+
+    fn empty_clone(&self) -> RandomValue {
+        match self {
+            RandomValue::SingleByte(_) => RandomValue::SingleByte(0),
+            RandomValue::SimpleNumber(_) => RandomValue::SimpleNumber(0),
+            RandomValue::LargeNumber(_) => RandomValue::LargeNumber(0),
+            RandomValue::TupleValue(_) => RandomValue::TupleValue((0.0, 0)),
+            RandomValue::RecordValue(_) => RandomValue::RecordValue(RandomRecord {
+                field_0: 0,
+                padding_0: 0,
+                field_1: 0,
+                field_2: 0,
+                field_4: 0,
+                field_5: 0,
+                field_6: 0.0,
+                padding_6: 0,
+            }),
+            RandomValue::ArrayValue(_) => RandomValue::ArrayValue(from_fn(|_| 0)),
+            RandomValue::BulkValue(_) => RandomValue::BulkValue(from_fn(|_| 0)),
+        }
+    }
+
+    fn read(&mut self, reader_writer: &mut StableMemoryStream) {
+        match self {
+            RandomValue::SingleByte(value) => *value = reader_writer.read(),
+            RandomValue::SimpleNumber(value) => *value = reader_writer.read(),
+            RandomValue::LargeNumber(value) => *value = reader_writer.read(),
+            RandomValue::TupleValue(value) => *value = reader_writer.read(),
+            RandomValue::RecordValue(value) => *value = reader_writer.read(),
+            RandomValue::ArrayValue(value) => {
+                reader_writer.raw_read(value as *mut i64 as usize, size_of::())
+            }
+            RandomValue::BulkValue(value) => {
+                reader_writer.raw_read(value as *mut u8 as usize, size_of::())
+            }
+        }
+    }
+
+    fn update(&self, reader_writer: &mut StableMemoryStream) {
+        match self {
+            RandomValue::SingleByte(value) => reader_writer.update(value),
+            RandomValue::SimpleNumber(value) => reader_writer.update(value),
+            RandomValue::LargeNumber(value) => reader_writer.update(value),
+            RandomValue::TupleValue(value) => reader_writer.update(value),
+            RandomValue::RecordValue(value) => reader_writer.update(value),
+            RandomValue::ArrayValue(value) => reader_writer.update(value),
+            RandomValue::BulkValue(value) => reader_writer.update(value),
+        }
+    }
+}
+
+fn test_randomized_read_write() {
+    println!("    Testing randomized read write ...");
+    const RANDOM_SEED: u64 = 4711;
+    let mut random = Rand32::new(RANDOM_SEED);
+    let mut series = vec![];
+    let stable_start = random.rand_range(0..1000) as u64;
+    let mut reader_writer = StableMemoryStream::open(stable_start);
+    let mut total_size = 0;
+    const AMOUNT: usize = 1000;
+    for _ in 0..AMOUNT {
+        let input = RandomValue::generate(&mut random);
+        input.write(&mut reader_writer);
+        total_size += input.size();
+        series.push(input);
+        assert!(!reader_writer.scan_completed());
+        if random.rand_u32() % 2 == 0 {
+            let expected = series.remove(0);
+            let mut output = expected.empty_clone();
+            output.read(&mut reader_writer);
+            assert_eq!(output, expected);
+            let empty = output.empty_clone();
+            empty.update(&mut reader_writer);
+        }
+    }
+    while !reader_writer.scan_completed() {
+        let expected = series.remove(0);
+        let mut output = expected.empty_clone();
+        output.read(&mut reader_writer);
+        assert_eq!(output, expected);
+        let empty = output.empty_clone();
+        empty.update(&mut reader_writer);
+    }
+    assert!(reader_writer.scan_completed());
+    reader_writer.close();
+    assert_eq!(reader_writer.written_length(), total_size as u64);
+    check_zeroed_stable_memory(stable_start, total_size);
+}
+
+fn check_zeroed_stable_memory(stable_start: u64, size: usize) {
+    for index in 0..size {
+        let mut data = 0u8;
+        ic0_stable64_read(&mut data as *mut u8 as u64, stable_start + index as u64, 1);
+        assert_eq!(data, 0);
+    }
+}
diff --git a/rts/motoko-rts-tests/src/stabilization/stable_bigints.rs b/rts/motoko-rts-tests/src/stabilization/stable_bigints.rs
new file mode 100644
index 00000000000..7be0f0e3be9
--- /dev/null
+++ b/rts/motoko-rts-tests/src/stabilization/stable_bigints.rs
@@ -0,0 +1,85 @@
+use std::ptr::null_mut;
+
+use motoko_rts::{
+    bigint::{bigint_add, bigint_eq, bigint_mul, bigint_neg, bigint_of_word64},
+    types::Value,
+};
+use oorandom::Rand32;
+
+use crate::{
+    bigint::set_bigint_heap,
+    memory::{initialize_test_memory, reset_test_memory},
+    stabilization::{deserialize, serialize, stable_memory::clear_stable_memory},
+};
+
+pub unsafe fn test() {
+    println!("  Testing stable big integers ...");
+
+    const RANDOM_SEED: u64 = 4711;
+    let mut random = Rand32::new(RANDOM_SEED);
+
+    test_simple_numbers();
+    test_small_random_numbers(&mut random);
+    test_big_random_numbers(&mut random);
+
+    clear_stable_memory();
+}
+
+unsafe fn test_simple_numbers() {
+    println!("    Testing simple numbers ...");
+
+    for number in 0..256 {
+        test_bigint(|| bigint_of_word64(number));
+    }
+}
+
+unsafe fn test_small_random_numbers(random: &mut Rand32) {
+    println!("    Testing small random numbers ...");
+
+    const TEST_RUNS: u32 = 1000;
+    for _ in 0..TEST_RUNS {
+        let number = random.rand_u32() as u64;
+        test_bigint(|| bigint_of_word64(number));
+    }
+}
+
+unsafe fn test_big_random_numbers(random: &mut Rand32) {
+    println!("    Testing big random numbers ...");
+
+    const TEST_RUNS: u32 = 10_000;
+    for _ in 0..TEST_RUNS {
+        test_bigint(|| random_bigint(random));
+    }
+}
+
+unsafe fn random_bigint(random: &mut Rand32) -> Value {
+    const STEPS: usize = 20;
+    let mut last = bigint_of_word64(random.rand_u32() as u64);
+    let mut current = bigint_of_word64(random.rand_u32() as u64);
+    for _ in 0..STEPS {
+        let computed = match random.rand_range(0..3) {
+            0 => bigint_add(last, current),
+            1 => bigint_mul(last, current),
+            2 => bigint_neg(current),
+            _ => unreachable!(),
+        };
+        last = current;
+        current = computed;
+    }
+    current
+}
+
+unsafe fn test_bigint Value>(mut generate_bigint: F) {
+    let mut memory = initialize_test_memory();
+    set_bigint_heap(&mut memory);
+    let input = generate_bigint();
+    // Clone the input bigint object, because it is destructed on serialization.
+    let clone = bigint_add(input, bigint_of_word64(0));
+    assert!(bigint_eq(clone, input));
+    let stable_size = serialize(clone, 0);
+    // Note: `clone` is no longer a valid bigint because it has been replaced by a forwarding object.
+    let output = deserialize(&mut memory, 0, stable_size);
+    assert!(bigint_eq(output, input));
+    set_bigint_heap(null_mut());
+    reset_test_memory();
+}
diff --git a/rts/motoko-rts-tests/src/stabilization/stable_memory.rs b/rts/motoko-rts-tests/src/stabilization/stable_memory.rs
new file mode 100644
index 00000000000..a734a9e7c89
--- /dev/null
+++ b/rts/motoko-rts-tests/src/stabilization/stable_memory.rs
@@ -0,0 +1,49 @@
+use motoko_rts::{mem_utils::memcpy_bytes, types::Bytes};
+use std::cell::RefCell;
+
+const PAGE_SIZE: u64 = 64 * 1024;
+
+thread_local! {
+    static STABLE_MEMORY: RefCell> = RefCell::new(vec![]);
+}
+
+pub fn clear_stable_memory() {
+    STABLE_MEMORY.with(|memory| {
+        memory.borrow_mut().clear();
+    })
+}
+
+#[no_mangle]
+pub fn ic0_stable64_write(offset: u64, source: u64, size: u64) {
+    STABLE_MEMORY.with(|memory| {
+        assert!(offset + size <= memory.borrow().len() as u64);
+        let destination = memory.borrow_mut().as_mut_ptr() as u64 + offset;
+        unsafe {
+            memcpy_bytes(destination as usize, source as usize, Bytes(size as usize));
+        }
+    });
+}
+
+#[no_mangle]
+pub fn ic0_stable64_read(destination: u64, offset: u64, size: u64) {
+    STABLE_MEMORY.with(|memory| {
+        assert!(offset + size <= memory.borrow().len() as u64);
+        let source = memory.borrow_mut().as_mut_ptr() as u64 + offset;
+        unsafe {
+            memcpy_bytes(destination as usize, source as usize, Bytes(size as usize));
+        }
+    });
+}
+
+#[no_mangle]
+pub fn ic0_stable64_size() -> u64 {
+    STABLE_MEMORY.with(|memory| memory.borrow().len()) as u64 / PAGE_SIZE
+}
+
+#[no_mangle]
+pub fn ic0_stable64_grow(additional_pages: u64) -> u64 {
+    for _ in 0..additional_pages * PAGE_SIZE {
+        STABLE_MEMORY.with(|memory| memory.borrow_mut().push(0));
+    }
+    additional_pages
+}
diff --git a/rts/motoko-rts-tests/src/stable_mem.rs b/rts/motoko-rts-tests/src/stable_mem.rs
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/rts/motoko-rts-tests/src/text.rs b/rts/motoko-rts-tests/src/text.rs
index 1d534f4df85..218d265a5d2 100644
--- a/rts/motoko-rts-tests/src/text.rs
+++ b/rts/motoko-rts-tests/src/text.rs
@@ -1,6 +1,6 @@
 //! Text and text iterator tests
 
-use crate::memory::{initialize_test_memory, reset_test_memory, TestMemory};
+use crate::memory::{initialize_test_memory, reset_test_memory};
 
 use motoko_rts::memory::Memory;
 use motoko_rts::text::{
@@ -8,12 +8,10 @@ use motoko_rts::text::{
     text_singleton, text_size,
 };
 use motoko_rts::text_iter::{text_iter, text_iter_done, text_iter_next};
-use motoko_rts::types::{Bytes, Value, Words, TAG_BLOB_T};
+use motoko_rts::types::{Bytes, Value, TAG_BLOB_T};
 
 use std::convert::TryFrom;
 
-use proptest::test_runner::{Config, TestCaseError, TestCaseResult, TestRunner};
-
 static STR: &str = "abcdefgh";
 
 struct TextIter<'a, M: Memory> {
@@ -55,9 +53,9 @@ pub unsafe fn test() {
         let char = char::try_from(i).unwrap();
         let mut str = String::new();
         str.push(char);
-        let mut out: u32 = 0;
+        let mut out: usize = 0;
         let char_decoded = decode_code_point(str.as_ptr(), &mut out as *mut _);
-        assert_eq!(out, str.len() as u32);
+        assert_eq!(out, str.len());
         assert_eq!(char::try_from(char_decoded).unwrap(), char);
 
         let text = text_singleton(&mut mem, char as u32);
@@ -76,23 +74,32 @@ pub unsafe fn test() {
     println!("  Testing concatenation");
     concat1(&mut mem);
 
-    drop(mem);
+    let mut long_text = String::from("");
+    for i in 0..=255u32 {
+        long_text.push(char::try_from(i).unwrap());
+    }
+    concat_test(&mut mem, vec![String::from("")]);
+    concat_test(&mut mem, vec![String::from(""), String::from("")]);
+    concat_test(&mut mem, vec![String::from("a"), String::from("")]);
+    concat_test(&mut mem, vec![String::from(""), String::from("b")]);
+    concat_test(&mut mem, vec![String::from("a"), String::from("b")]);
+    concat_test(
+        &mut mem,
+        vec![String::from(""), String::from(""), String::from("")],
+    );
+    concat_test(
+        &mut mem,
+        vec![
+            String::from("test-"),
+            String::from("abc"),
+            String::from("-0123"),
+            String::from("_!?"),
+            String::from("äöü"),
+            long_text,
+        ],
+    );
 
-    let mut proptest_runner = TestRunner::new(Config {
-        cases: 1_000,
-        failure_persistence: None,
-        ..Default::default()
-    });
-
-    proptest_runner
-        .run(
-            &proptest::collection::vec(proptest::string::string_regex(".{0, 20}").unwrap(), 1..20),
-            |strs| {
-                let mut mem = TestMemory::new(Words(1024 * 1024));
-                concat_prop(&mut mem, strs)
-            },
-        )
-        .unwrap();
+    drop(mem);
 
     reset_test_memory();
 }
@@ -110,19 +117,19 @@ unsafe fn concat1(mem: &mut M) {
     let expected = strs.concat();
 
     // Check number of characters
-    assert_eq!(text_len(obj), expected.chars().count() as u32);
+    assert_eq!(text_len(obj), expected.chars().count());
 
     // Check text size in bytes
-    assert_eq!(text_size(obj), Bytes(expected.len() as u32));
+    assert_eq!(text_size(obj), Bytes(expected.len()));
 
     // Generate blob
     let text_blob = blob_of_text(mem, obj);
 
     // Check number of characters in blob
-    assert_eq!(text_len(text_blob), expected.chars().count() as u32);
+    assert_eq!(text_len(text_blob), expected.chars().count());
 
     // Check blob size in bytes
-    assert_eq!(text_size(text_blob), Bytes(expected.len() as u32));
+    assert_eq!(text_size(text_blob), Bytes(expected.len()));
 
     // Check blob iteration
     let blob = blob_of_text(mem, obj);
@@ -135,7 +142,7 @@ unsafe fn concat1(mem: &mut M) {
     assert_eq!(TextIter::from_text(mem, obj).collect::(), expected);
 }
 
-fn concat_prop(mem: &mut M, strs: Vec) -> TestCaseResult {
+fn concat_test(mem: &mut M, strs: Vec) {
     unsafe {
         let mut obj = text_of_str(mem, "");
         for str in &strs {
@@ -146,44 +153,28 @@ fn concat_prop(mem: &mut M, strs: Vec) -> TestCaseResult {
         let expected = strs.concat();
 
         // Check number of characters
-        if text_len(obj) != expected.chars().count() as u32 {
-            return Err(TestCaseError::Fail("text_len".into()));
-        }
+        assert_eq!(text_len(obj), expected.chars().count());
 
         // Check text size in bytes
-        if text_size(obj) != Bytes(expected.len() as u32) {
-            return Err(TestCaseError::Fail("text_size".into()));
-        }
+        assert_eq!(text_size(obj), Bytes(expected.len()));
 
         // Generate blob
         let text_blob = blob_of_text(mem, obj);
 
         // Check number of characters in blob
-        if text_len(text_blob) != expected.chars().count() as u32 {
-            return Err(TestCaseError::Fail("blob text_len".into()));
-        }
+        assert_eq!(text_len(text_blob), expected.chars().count());
 
         // Check blob size in bytes
-        if text_size(text_blob) != Bytes(expected.len() as u32) {
-            return Err(TestCaseError::Fail("blob text_size".into()));
-        }
+        assert_eq!(text_size(text_blob), Bytes(expected.len()));
 
         // Check blob iteration
         let blob = blob_of_text(mem, obj);
-        if TextIter::from_text(mem, blob).collect::() != expected {
-            return Err(TestCaseError::Fail("blob_of_text iteration".into()));
-        }
+        assert_eq!(TextIter::from_text(mem, blob).collect::(), expected);
 
         // Check blob-concat comparison
-        if text_compare(text_blob, obj) != 0 {
-            return Err(TestCaseError::Fail("text_compare of blob and text".into()));
-        }
+        assert_eq!(text_compare(text_blob, obj), 0);
 
         // Check concat iteration
-        if TextIter::from_text(mem, obj).collect::() != expected {
-            return Err(TestCaseError::Fail("iteration".into()));
-        }
-
-        Ok(())
+        assert_eq!(TextIter::from_text(mem, obj).collect::(), expected);
     }
 }
diff --git a/rts/motoko-rts-tests/src/utf8.rs b/rts/motoko-rts-tests/src/utf8.rs
index 209bedce4b4..5c3b6e4dc9e 100644
--- a/rts/motoko-rts-tests/src/utf8.rs
+++ b/rts/motoko-rts-tests/src/utf8.rs
@@ -56,16 +56,10 @@ pub unsafe fn test() {
     println!("Testing UTF8 validation ...");
 
     for test_str in TEST_STRS_VALID.iter() {
-        assert!(utf8_valid(
-            test_str.as_ptr() as *const _,
-            test_str.len() as u32
-        ));
+        assert!(utf8_valid(test_str.as_ptr() as *const _, test_str.len()));
     }
 
     for test_str in TEST_STRS_INVALID.iter() {
-        assert!(!utf8_valid(
-            test_str.as_ptr() as *const _,
-            test_str.len() as u32
-        ));
+        assert!(!utf8_valid(test_str.as_ptr() as *const _, test_str.len()));
     }
 }
diff --git a/rts/motoko-rts/.vim/coc-settings.json b/rts/motoko-rts/.vim/coc-settings.json
index 36bd8a25530..40b2134566b 100644
--- a/rts/motoko-rts/.vim/coc-settings.json
+++ b/rts/motoko-rts/.vim/coc-settings.json
@@ -1,6 +1,6 @@
 // https://github.com/rust-analyzer/rust-analyzer/blob/master/editors/code/package.json
 {
-    "rust-analyzer.cargo.target": "wasm32-unknown-emscripten",
+    "rust-analyzer.cargo.target": "wasm64-unknown-unknown",
 
     // This is required as `cargo check --all-targets` doesn't seem to work well
     // on no-std crates, it generates false "duplicate lang item" errors.
diff --git a/rts/motoko-rts/Cargo.lock b/rts/motoko-rts/Cargo.lock
index 38910e441b2..bd9a8511232 100644
--- a/rts/motoko-rts/Cargo.lock
+++ b/rts/motoko-rts/Cargo.lock
@@ -4,9 +4,9 @@ version = 3
 
 [[package]]
 name = "libc"
-version = "0.2.142"
+version = "0.2.153"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6a987beff54b60ffa6d51982e1aa1146bc42f19bd26be28b0586f252fccf5317"
+checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
 
 [[package]]
 name = "motoko-rts"
@@ -27,18 +27,18 @@ dependencies = [
 
 [[package]]
 name = "proc-macro2"
-version = "1.0.56"
+version = "1.0.66"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435"
+checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9"
 dependencies = [
  "unicode-ident",
 ]
 
 [[package]]
 name = "quote"
-version = "1.0.26"
+version = "1.0.31"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc"
+checksum = "5fe8a65d69dd0808184ebb5f836ab526bb259db23c657efa38711b1072ee47f0"
 dependencies = [
  "proc-macro2",
 ]
@@ -56,6 +56,6 @@ dependencies = [
 
 [[package]]
 name = "unicode-ident"
-version = "1.0.8"
+version = "1.0.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4"
+checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c"
diff --git a/rts/motoko-rts/Cargo.toml b/rts/motoko-rts/Cargo.toml
index 59350673ba4..8acedf98c21 100644
--- a/rts/motoko-rts/Cargo.toml
+++ b/rts/motoko-rts/Cargo.toml
@@ -25,8 +25,17 @@ ic = []
 # This feature enables extensive memory sanity checks in the incremental GC.
 memory_check = []
 
+# Incremental GC, using the extended object header containing the forwarding pointer. 
+incremental_gc = []
+
+# Classical persistence, 32-bit, with Candid stabilization
+classical_persistence = ["libc"]
+
+# Enhanced orthogonal persistence, 64-bit, retaining the main memory across upgrades.
+enhanced_orthogonal_persistence = ["incremental_gc"]
+
 [dependencies]
-libc = { version = "0.2.139", default_features = false }
+libc = { version = "0.2.139", default_features = false, optional = true }
 motoko-rts-macros = { path = "../motoko-rts-macros" }
 
 [profile.dev]
diff --git a/rts/motoko-rts/native/Cargo.toml b/rts/motoko-rts/native/Cargo.toml
index b78322947c7..690ca564c23 100644
--- a/rts/motoko-rts/native/Cargo.toml
+++ b/rts/motoko-rts/native/Cargo.toml
@@ -8,8 +8,13 @@ edition = "2018"
 crate-type = ["rlib"]
 path = "../src/lib.rs"
 
+[features]
+incremental_gc = []
+classical_persistence = ["libc"]
+enhanced_orthogonal_persistence = ["incremental_gc"]
+
 [dependencies]
-libc = { version = "0.2.112", default_features = false }
+libc = { version = "0.2.112", default_features = false, optional = true }
 motoko-rts-macros = { path = "../../motoko-rts-macros" }
 
 [profile.dev]
diff --git a/rts/motoko-rts/src/allocator.rs b/rts/motoko-rts/src/allocator.rs
index 8111e3f4a52..a0492cd3e79 100644
--- a/rts/motoko-rts/src/allocator.rs
+++ b/rts/motoko-rts/src/allocator.rs
@@ -1,6 +1,7 @@
 // c.f. https://os.phil-opp.com/heap-allocation/#dynamic-memory
 
 use alloc::alloc::{GlobalAlloc, Layout};
+use motoko_rts_macros::enhanced_orthogonal_persistence;
 //use core::ptr::null_mut;
 use crate::memory::{alloc_blob, ic};
 use crate::types::{Bytes, TAG_BLOB_B};
@@ -20,12 +21,11 @@ unsafe impl GlobalAlloc for EphemeralAllocator {
         let align = layout.align();
         // align is a power of 2
         debug_assert!(align.count_ones() == 1);
-        let word_size = crate::constants::WORD_SIZE as usize;
+        let word_size = crate::constants::WORD_SIZE;
         let min_align = (align + word_size - 1) / word_size * word_size;
         let blob_size = size + min_align - word_size;
-        let blob =
-            alloc_blob::(&mut ic::IcMemory, TAG_BLOB_B, Bytes(blob_size as u32))
-                .as_blob_mut();
+        let blob = alloc_blob::(&mut ic::IcMemory, TAG_BLOB_B, Bytes(blob_size))
+            .as_blob_mut();
         let payload_address = blob.payload_addr() as usize;
         let aligned_address = (payload_address + min_align - 1) / min_align * min_align;
 
@@ -41,3 +41,33 @@ unsafe impl GlobalAlloc for EphemeralAllocator {
 
 #[global_allocator]
 static ALLOCATOR: EphemeralAllocator = EphemeralAllocator;
+
+#[no_mangle]
+#[enhanced_orthogonal_persistence]
+unsafe fn __rust_alloc(size: usize, align: usize) -> *mut u8 {
+    ALLOCATOR.alloc(Layout::from_size_align_unchecked(size, align))
+}
+
+#[no_mangle]
+#[enhanced_orthogonal_persistence]
+unsafe fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize) {
+    ALLOCATOR.dealloc(ptr, Layout::from_size_align_unchecked(size, align));
+}
+
+#[no_mangle]
+#[enhanced_orthogonal_persistence]
+fn __rust_realloc(_ptr: *mut u8, _old_size: usize, _align: usize, _new_size: usize) -> *mut u8 {
+    unimplemented!();
+}
+
+#[no_mangle]
+#[enhanced_orthogonal_persistence]
+fn __rust_alloc_zeroed(_size: usize, _align: usize) -> *mut u8 {
+    unimplemented!();
+}
+
+#[no_mangle]
+#[enhanced_orthogonal_persistence]
+fn __rust_alloc_error_handler(_size: usize, _align: usize) -> ! {
+    panic!("Rust allocation error");
+}
diff --git a/rts/motoko-rts/src/barriers.rs b/rts/motoko-rts/src/barriers.rs
index 7a3e7759070..44f95bec811 100644
--- a/rts/motoko-rts/src/barriers.rs
+++ b/rts/motoko-rts/src/barriers.rs
@@ -1,13 +1,35 @@
+use motoko_rts_macros::{incremental_gc, non_incremental_gc};
+
 use crate::{memory::Memory, types::Value};
 
+#[incremental_gc]
 pub unsafe fn init_with_barrier(_mem: &mut M, location: *mut Value, value: Value) {
     *location = value.forward_if_possible();
 }
 
+#[non_incremental_gc]
+pub unsafe fn init_with_barrier(mem: &mut M, location: *mut Value, value: Value) {
+    *location = value;
+    crate::gc::generational::write_barrier::post_write_barrier(mem, location as usize);
+}
+
+#[incremental_gc]
 pub unsafe fn write_with_barrier(mem: &mut M, location: *mut Value, value: Value) {
     crate::gc::incremental::barriers::write_with_barrier(mem, location, value);
 }
 
+#[non_incremental_gc]
+pub unsafe fn write_with_barrier(mem: &mut M, location: *mut Value, value: Value) {
+    *location = value;
+    crate::gc::generational::write_barrier::post_write_barrier(mem, location as usize);
+}
+
+#[incremental_gc]
 pub unsafe fn allocation_barrier(new_object: Value) -> Value {
     crate::gc::incremental::barriers::allocation_barrier(new_object)
 }
+
+#[non_incremental_gc]
+pub unsafe fn allocation_barrier(new_object: Value) -> Value {
+    new_object
+}
diff --git a/rts/motoko-rts/src/bigint.rs b/rts/motoko-rts/src/bigint.rs
index fcbb4e6a06b..cc2131a27cf 100644
--- a/rts/motoko-rts/src/bigint.rs
+++ b/rts/motoko-rts/src/bigint.rs
@@ -38,14 +38,38 @@ use crate::memory::Memory;
 use crate::tommath_bindings::*;
 use crate::types::{size_of, BigInt, Bytes, Value, TAG_BIGINT};
 
-use motoko_rts_macros::ic_mem_fn;
+use crate::libc_declarations::c_void;
+
+#[classical_persistence]
+use crate::types::Stream;
+
+use motoko_rts_macros::{classical_persistence, ic_mem_fn};
+
+#[cfg(feature = "ic")]
+use motoko_rts_macros::enhanced_orthogonal_persistence;
 
 // Provided by generated code
+#[cfg(feature = "ic")]
 extern "C" {
-    fn int_from_i32(value: i32) -> Value;
+    #[enhanced_orthogonal_persistence]
+    fn int_from_i64(value: isize) -> Value;
+    #[classical_persistence]
+    fn int_from_i32(value: isize) -> Value;
+}
+
+#[cfg(feature = "ic")]
+#[enhanced_orthogonal_persistence]
+unsafe fn int_from_isize(value: isize) -> Value {
+    int_from_i64(value)
 }
 
-unsafe fn mp_alloc(mem: &mut M, size: Bytes) -> *mut u8 {
+#[cfg(feature = "ic")]
+#[classical_persistence]
+unsafe fn int_from_isize(value: isize) -> Value {
+    int_from_i32(value)
+}
+
+unsafe fn mp_alloc(mem: &mut M, size: Bytes) -> *mut u8 {
     let ptr = mem.alloc_words(size_of::() + size.to_words());
     // NB. Cannot use as_bigint() here as header is not written yet
     let blob = ptr.get_ptr() as *mut BigInt;
@@ -55,7 +79,9 @@ unsafe fn mp_alloc(mem: &mut M, size: Bytes) -> *mut u8 {
     // libtommath stores the size of the object in alloc as count of mp_digits (u64)
     let size = size.as_usize();
     debug_assert_eq!((size % core::mem::size_of::()), 0);
-    (*blob).mp_int.alloc = (size / core::mem::size_of::()) as i32;
+    let count = size / core::mem::size_of::();
+    assert!(count <= i32::MAX as usize);
+    (*blob).mp_int.alloc = count as i32;
     allocation_barrier(ptr);
     blob.payload_addr() as *mut u8
 }
@@ -65,18 +91,18 @@ pub unsafe fn mp_calloc(
     mem: &mut M,
     n_elems: usize,
     elem_size: Bytes,
-) -> *mut libc::c_void {
+) -> *mut c_void {
     debug_assert_eq!(elem_size.0, core::mem::size_of::());
     // Overflow check for the following multiplication
     if n_elems > 1 << 30 {
         bigint_trap();
     }
-    let size = Bytes((n_elems * elem_size.0) as u32);
-    let payload = mp_alloc(mem, size) as *mut u32;
+    let size = Bytes(n_elems * elem_size.0);
+    let payload = mp_alloc(mem, size) as *mut usize;
 
     // NB. alloc_bytes rounds up to words so we do the same here to set the whole buffer
     for i in 0..size.to_words().as_usize() {
-        *payload.add(i as usize) = 0;
+        *payload.add(i) = 0;
     }
 
     payload as *mut _
@@ -85,10 +111,10 @@ pub unsafe fn mp_calloc(
 #[ic_mem_fn]
 pub unsafe fn mp_realloc(
     mem: &mut M,
-    ptr: *mut libc::c_void,
-    old_size: Bytes,
-    new_size: Bytes,
-) -> *mut libc::c_void {
+    ptr: *mut c_void,
+    old_size: Bytes,
+    new_size: Bytes,
+) -> *mut c_void {
     let bigint = BigInt::from_payload(ptr as *mut mp_digit);
 
     debug_assert_eq!((*bigint).header.tag, TAG_BIGINT);
@@ -108,7 +134,7 @@ pub unsafe fn mp_realloc(
 }
 
 #[no_mangle]
-pub unsafe extern "C" fn mp_free(_ptr: *mut libc::c_void, _size: u32) {}
+pub unsafe extern "C" fn mp_free(_ptr: *mut c_void, _size: usize) {}
 
 /*
 Note on libtommath error handling
@@ -161,14 +187,14 @@ pub(crate) unsafe fn mp_iszero(p: *const mp_int) -> bool {
 }
 
 // Allocates an mp_int on the stack
-unsafe fn tmp_bigint() -> mp_int {
+pub(crate) unsafe fn tmp_bigint() -> mp_int {
     let mut i: mp_int = core::mem::zeroed();
     check(mp_init(&mut i));
     i
 }
 
 // Persists an mp_int from the stack on the heap
-unsafe fn persist_bigint(i: mp_int) -> Value {
+pub(crate) unsafe fn persist_bigint(i: mp_int) -> Value {
     if i.dp == core::ptr::null_mut() {
         panic!("persist_bigint: dp == NULL?");
     }
@@ -181,6 +207,7 @@ unsafe fn persist_bigint(i: mp_int) -> Value {
 }
 
 #[no_mangle]
+#[classical_persistence]
 pub unsafe extern "C" fn bigint_of_word32(w: u32) -> Value {
     let mut i = tmp_bigint();
     mp_set_u32(&mut i, w);
@@ -189,6 +216,7 @@ pub unsafe extern "C" fn bigint_of_word32(w: u32) -> Value {
 
 #[cfg(feature = "ic")]
 #[no_mangle]
+#[classical_persistence]
 unsafe extern "C" fn bigint_of_int32(j: i32) -> Value {
     let mut i = tmp_bigint();
     mp_set_i32(&mut i, j);
@@ -212,14 +240,16 @@ unsafe extern "C" fn bigint_to_word32_trap(p: Value) -> u32 {
     mp_get_u32(mp_int)
 }
 
-// a : BigInt, msg : Blob
+// p : BigInt, msg : Blob
 #[cfg(feature = "ic")]
 #[no_mangle]
 unsafe extern "C" fn bigint_to_word32_trap_with(p: Value, msg: Value) -> u32 {
     let mp_int = p.as_bigint().mp_int_ptr();
 
     if mp_isneg(mp_int) || mp_count_bits(mp_int) > 32 {
-        crate::rts_trap(msg.as_blob().payload_const(), msg.as_blob().len());
+        let length = msg.as_blob().len().as_usize();
+        assert!(length <= u32::MAX as usize);
+        crate::rts_trap(msg.as_blob().payload_const(), length as u32);
     }
 
     mp_get_u32(mp_int)
@@ -243,9 +273,23 @@ unsafe extern "C" fn bigint_to_word64_trap(p: Value) -> u64 {
     mp_get_u64(mp_int)
 }
 
+// p : BigInt, msg : Blob
 #[cfg(feature = "ic")]
 #[no_mangle]
-unsafe extern "C" fn bigint_of_word64(w: u64) -> Value {
+unsafe extern "C" fn bigint_to_word64_trap_with(p: Value, msg: Value) -> u64 {
+    let mp_int = p.as_bigint().mp_int_ptr();
+
+    if mp_isneg(mp_int) || mp_count_bits(mp_int) > 64 {
+        let length = msg.as_blob().len().as_usize();
+        assert!(length <= u32::MAX as usize);
+        crate::rts_trap(msg.as_blob().payload_const(), length as u32);
+    }
+
+    mp_get_u64(mp_int)
+}
+
+#[no_mangle]
+pub unsafe extern "C" fn bigint_of_word64(w: u64) -> Value {
     let mut i = tmp_bigint();
     mp_set_u64(&mut i, w);
     persist_bigint(i)
@@ -262,12 +306,38 @@ unsafe extern "C" fn bigint_of_int64(j: i64) -> Value {
 #[cfg(feature = "ic")]
 #[no_mangle]
 unsafe extern "C" fn bigint_of_float64(j: f64) -> Value {
-    // handle fast path: some numbers (when rounded towards zero by `j as i32`)
-    // may be represented as `Int` without resorting to heap allocation, i.e.
-    // in the range `-1073741824 == 0xc0000000 <= j as i32 <= 0x3fffffff == 1073741823`
-    if j < 1073741824.0 && j > -1073741825.0 {
+    // Fast path: Determine when the integer numbers can be represented as a compact (unboxed) `Int`.
+    let is_compact = match usize::BITS {
+        u64::BITS => {
+            // The integer can be represented in compact 64-bit scalar if it is in the range
+            // `-4611686018427387904 == -2 ** 62 <= j as i64 < 2 ** 62 == 4611686018427387904`, by
+            // considering that the two most significant bits are reserved for the `BigInt` scalar tag.
+            // The closest binary64 float representations in IEEE 754 for the boundaries are:
+            // Lower boundary approximation >= -2 ** 62:
+            //   `f64::from_bits(0xC3CF_FFFF_FFFF_FFFF`) == -4611686018427387400 > -4611686018427387904
+            // Upper boundary approximation < 2 ** 62:
+            //   `f64::from_bits(0x43CF_FFFF_FFFF_FFFF) == 4611686018427387400 < 4611686018427387904.
+            // IEEE754 double precision encoding:
+            // ┌───────────────┬────────────────────────┬─────────────────────────┐
+            // │ sign (bit 63) │ exponent (bits 52..62) |  mantissa (bits 0..51)  |
+            // └───────────────┴────────────────────────┴─────────────────────────┘
+            // The exponent has a bias of 1023:
+            // * Example: Exponent 61 is encoded 0x43C == 1023 + 61.
+            // The mantissa has an implicit extra most significant bit 1.
+            // * Example: Mantissa `F_FFFF_FFFF_FFFF` actually represents `1F_FFFF_FFFF_FFFF`.
+            j >= -4611686018427387400.0f64 && j <= 4611686018427387400.0f64
+        }
+        u32::BITS => {
+            // The integer can be represented in compact 32-bit scalar if it is in the range
+            // `-1073741824 == 0xc0000000 <= j as i32 <= 0x3fffffff == 1073741823`, by
+            // considering that the two most significant bits are reserved for the `BigInt` scalar tag.
+            j < 1073741824.0 && j > -1073741825.0
+        }
+        _ => unreachable!(),
+    };
+    if is_compact {
         // defer to generated code to create compact or boxed Int value
-        return int_from_i32(j as i32);
+        return int_from_isize(j as isize);
     }
     let mut i = tmp_bigint();
     check(mp_set_double(&mut i, j));
@@ -401,19 +471,19 @@ unsafe extern "C" fn bigint_isneg(a: Value) -> bool {
 
 #[cfg(feature = "ic")]
 #[no_mangle]
-unsafe extern "C" fn bigint_lsh(a: Value, b: i32) -> Value {
+unsafe extern "C" fn bigint_lsh(a: Value, b: isize) -> Value {
     let mut i = tmp_bigint();
-    check(mp_mul_2d(a.as_bigint().mp_int_ptr(), b, &mut i));
+    check(mp_mul_2d(a.as_bigint().mp_int_ptr(), b as i32, &mut i));
     persist_bigint(i)
 }
 
 #[cfg(feature = "ic")]
 #[no_mangle]
-unsafe extern "C" fn bigint_rsh(a: Value, b: i32) -> Value {
+unsafe extern "C" fn bigint_rsh(a: Value, b: isize) -> Value {
     let mut i = tmp_bigint();
     check(mp_div_2d(
         a.as_bigint().mp_int_ptr(),
-        b,
+        b as i32,
         &mut i,
         core::ptr::null_mut(),
     ));
@@ -421,16 +491,16 @@ unsafe extern "C" fn bigint_rsh(a: Value, b: i32) -> Value {
 }
 
 #[no_mangle]
-unsafe extern "C" fn bigint_count_bits(a: Value) -> i32 {
-    mp_count_bits(a.as_bigint().mp_int_ptr())
+unsafe extern "C" fn bigint_count_bits(a: Value) -> usize {
+    mp_count_bits(a.as_bigint().mp_int_ptr()) as usize
 }
 
 #[no_mangle]
-pub unsafe extern "C" fn bigint_leb128_size(a: Value) -> u32 {
+pub unsafe extern "C" fn bigint_leb128_size(a: Value) -> usize {
     if mp_iszero(a.as_bigint().mp_int_ptr()) {
         1
     } else {
-        (bigint_count_bits(a) as u32 + 6) / 7 // divide by 7, round up
+        (bigint_count_bits(a) + 6) / 7 // divide by 7, round up
     }
 }
 
@@ -461,20 +531,29 @@ pub unsafe extern "C" fn bigint_leb128_encode(n: Value, buf: *mut u8) {
 }
 
 #[no_mangle]
-unsafe extern "C" fn bigint_2complement_bits(n: Value) -> u32 {
+#[classical_persistence]
+pub unsafe extern "C" fn bigint_leb128_stream_encode(stream: *mut Stream, n: Value) {
+    debug_assert!(!stream.is_forwarded());
+    let mut tmp: mp_int = core::mem::zeroed(); // or core::mem::uninitialized?
+    check(mp_init_copy(&mut tmp, n.as_bigint().mp_int_ptr()));
+    stream.write_leb128(&mut tmp, false)
+}
+
+#[no_mangle]
+unsafe extern "C" fn bigint_2complement_bits(n: Value) -> usize {
     let mp_int = n.as_bigint().mp_int_ptr();
     if mp_isneg(mp_int) {
         let mut tmp: mp_int = core::mem::zeroed(); // or core::mem::uninitialized?
         check(mp_init_copy(&mut tmp, mp_int));
         check(mp_incr(&mut tmp));
-        1 + mp_count_bits(&tmp) as u32
+        1 + mp_count_bits(&tmp) as usize
     } else {
-        1 + mp_count_bits(mp_int) as u32
+        1 + mp_count_bits(mp_int) as usize
     }
 }
 
 #[no_mangle]
-pub unsafe extern "C" fn bigint_sleb128_size(n: Value) -> u32 {
+pub unsafe extern "C" fn bigint_sleb128_size(n: Value) -> usize {
     (bigint_2complement_bits(n) + 6) / 7 // divide by 7, round up
 }
 
@@ -496,6 +575,26 @@ pub unsafe extern "C" fn bigint_sleb128_encode(n: Value, buf: *mut u8) {
     }
 }
 
+#[no_mangle]
+#[classical_persistence]
+pub unsafe extern "C" fn bigint_sleb128_stream_encode(stream: *mut Stream, n: Value) {
+    debug_assert!(!stream.is_forwarded());
+    let mut tmp: mp_int = core::mem::zeroed(); // or core::mem::uninitialized?
+    check(mp_init_copy(&mut tmp, n.as_bigint().mp_int_ptr()));
+
+    if mp_isneg(&tmp) {
+        // Turn negative numbers into the two's complement of the right size
+        let mut big: mp_int = core::mem::zeroed();
+        check(mp_init(&mut big));
+        let bytes = bigint_sleb128_size(n);
+        check(mp_2expt(&mut big, 7 * bytes as i32));
+        check(mp_add(&mut tmp, &big, &mut tmp));
+        stream.write_leb128(&mut tmp, false)
+    } else {
+        stream.write_leb128(&mut tmp, true)
+    }
+}
+
 #[no_mangle]
 pub unsafe extern "C" fn bigint_leb128_decode(buf: *mut Buf) -> Value {
     let mut i = tmp_bigint();
@@ -517,7 +616,21 @@ pub unsafe extern "C" fn bigint_leb128_decode(buf: *mut Buf) -> Value {
     persist_bigint(i)
 }
 
-/// Decode at most 5 bytes of LEB128 data to a compact bignum `Value`.
+#[cfg(feature = "ic")]
+const BITS_PER_CHUNK: usize = 7;
+
+#[cfg(feature = "ic")]
+const MAX_CHUNKS_PER_WORD: usize = (usize::BITS as usize + BITS_PER_CHUNK - 1) / BITS_PER_CHUNK;
+
+#[classical_persistence]
+#[cfg(feature = "ic")]
+const _: () = assert!(MAX_CHUNKS_PER_WORD == 5);
+
+#[enhanced_orthogonal_persistence]
+#[cfg(feature = "ic")]
+const _: () = assert!(MAX_CHUNKS_PER_WORD == 10);
+
+/// Decode bytes of LEB128 data to a compact bignum `Value`.
 /// The number of 7-bit chunks are located in the lower portion of `leb`
 /// as indicated by `bits`.
 ///
@@ -528,7 +641,7 @@ pub unsafe extern "C" fn bigint_leb128_decode_word64(
     mut bits: u64,
     buf: *mut Buf,
 ) -> Value {
-    let continuations = bits as u32 / 8;
+    let continuations = bits as usize / 8;
     buf.advance(continuations + 1);
 
     let mut mask: u64 = 0b111_1111; // sliding mask
@@ -536,20 +649,20 @@ pub unsafe extern "C" fn bigint_leb128_decode_word64(
     loop {
         acc |= leb & mask;
         if bits < 8 {
-            if continuations == 4 {
+            if continuations == MAX_CHUNKS_PER_WORD - 1 {
                 break;
             }
-            return int_from_i32(acc as i32);
+            return int_from_isize(acc as isize);
         }
         bits -= 8;
         mask <<= 7;
         leb >>= 1;
     }
 
-    let tentative = (acc as i32) << 1 >> 1; // top two bits must match
+    let tentative = (acc as isize) << 1 >> 1; // top two bits must match
     if tentative as u64 == acc {
         // roundtrip is valid
-        return int_from_i32(tentative);
+        return int_from_isize(tentative);
     }
 
     bigint_of_word64(acc)
@@ -585,7 +698,7 @@ pub unsafe extern "C" fn bigint_sleb128_decode(buf: *mut Buf) -> Value {
     persist_bigint(i)
 }
 
-/// Decode at most 5 bytes of SLEB128 data to a compact bignum `Value`.
+/// Decode bytes of SLEB128 data to a compact bignum `Value`.
 /// The number of 7-bit chunks are located in the lower portion of `sleb`
 /// as indicated by `bits`.
 ///
@@ -596,7 +709,7 @@ pub unsafe extern "C" fn bigint_sleb128_decode_word64(
     mut bits: u64,
     buf: *mut Buf,
 ) -> Value {
-    let continuations = bits as u32 / 8;
+    let continuations = bits as usize / 8;
     buf.advance(continuations + 1);
 
     let mut mask: u64 = 0b111_1111; // sliding mask
@@ -604,23 +717,38 @@ pub unsafe extern "C" fn bigint_sleb128_decode_word64(
     loop {
         acc |= sleb & mask;
         if bits < 8 {
-            if continuations == 4 {
+            if continuations == MAX_CHUNKS_PER_WORD - 1 {
                 break;
             }
-            let sext = 25 - 7 * continuations; // this many top bits will get a copy of the sign
-            return int_from_i32((acc as i32) << sext >> sext);
+            let sext = usize::BITS as usize - (BITS_PER_CHUNK * (continuations + 1)); // this many top bits will get a copy of the sign
+            return int_from_isize((acc as isize) << sext >> sext);
         }
         bits -= 8;
         mask <<= 7;
         sleb >>= 1;
     }
 
-    let signed = (acc as i64) << 29 >> 29; // sign extend
-    let tentative = (signed as i32) << 1 >> 1; // top two bits must match
+    sleb128_decode_word64_result(acc)
+}
+
+#[cfg(feature = "ic")]
+#[classical_persistence]
+unsafe fn sleb128_decode_word64_result(accumulator: u64) -> Value {
+    // Check if it fits into 32-bit or needs boxing to BigInt.
+    const UNUSED_BITS: u32 = u64::BITS - (BITS_PER_CHUNK * MAX_CHUNKS_PER_WORD) as u32;
+    const _: () = assert!(UNUSED_BITS == 29);
+    let signed = (accumulator as i64) << UNUSED_BITS >> UNUSED_BITS;
+    let tentative = (signed as isize) << 1 >> 1; // top two bits must match
     if tentative as i64 == signed {
         // roundtrip is valid
-        return int_from_i32(tentative);
+        return int_from_isize(tentative);
     }
-
     bigint_of_int64(signed)
 }
+
+#[cfg(feature = "ic")]
+#[enhanced_orthogonal_persistence]
+unsafe fn sleb128_decode_word64_result(accumulator: u64) -> Value {
+    // No unused bits in 64-bit representation. The sign bit is already set at bit 63.
+    int_from_isize(accumulator as isize)
+}
diff --git a/rts/motoko-rts/src/bitrel.rs b/rts/motoko-rts/src/bitrel.rs
index ec71fcf78f6..002445bdc6c 100644
--- a/rts/motoko-rts/src/bitrel.rs
+++ b/rts/motoko-rts/src/bitrel.rs
@@ -5,22 +5,22 @@ use crate::idl_trap_with;
 use crate::mem_utils::memzero;
 use crate::types::Words;
 
-const BITS: u32 = 2;
+const BITS: usize = 2;
 
 #[repr(packed)]
 pub struct BitRel {
     /// Pointer into the bit set
-    pub ptr: *mut u32,
+    pub ptr: *mut usize,
     /// Pointer to the end of the bit set
     /// must allow at least 2 * size1 * size2 bits
-    pub end: *mut u32,
-    pub size1: u32,
-    pub size2: u32,
+    pub end: *mut usize,
+    pub size1: usize,
+    pub size2: usize,
 }
 
 impl BitRel {
-    pub fn words(size1: u32, size2: u32) -> u32 {
-        return ((2 * size1 * size2 * BITS) + (usize::BITS - 1)) / usize::BITS;
+    pub fn words(size1: usize, size2: usize) -> usize {
+        return ((2 * size1 * size2 * BITS) + (usize::BITS as usize - 1)) / usize::BITS as usize;
     }
 
     pub unsafe fn init(&self) {
@@ -28,14 +28,20 @@ impl BitRel {
             idl_trap_with("BitRel invalid fields");
         };
 
-        let bytes = ((self.end as usize) - (self.ptr as usize)) as u32;
+        let bytes = (self.end as usize) - (self.ptr as usize);
         if bytes != BitRel::words(self.size1, self.size2) * WORD_SIZE {
             idl_trap_with("BitRel missized");
         };
         memzero(self.ptr as usize, Words(bytes / WORD_SIZE));
     }
 
-    unsafe fn locate_ptr_bit(&self, p: bool, i_j: u32, j_i: u32, bit: u32) -> (*mut u32, u32) {
+    unsafe fn locate_ptr_bit(
+        &self,
+        p: bool,
+        i_j: usize,
+        j_i: usize,
+        bit: usize,
+    ) -> (*mut usize, usize) {
         let size1 = self.size1;
         let size2 = self.size2;
         let (base, i, j) = if p { (0, i_j, j_i) } else { (size1, j_i, i_j) };
@@ -43,8 +49,8 @@ impl BitRel {
         debug_assert!(j < size2);
         debug_assert!(bit < BITS);
         let k = ((base + i) * size2 + j) * BITS + bit;
-        let word = (k / usize::BITS) as usize;
-        let bit = (k % usize::BITS) as u32;
+        let word = k / (usize::BITS as usize);
+        let bit = k % (usize::BITS as usize);
         let ptr = self.ptr.add(word);
         if ptr > self.end {
             idl_trap_with("BitRel indices out of bounds");
@@ -52,7 +58,7 @@ impl BitRel {
         return (ptr, bit);
     }
 
-    unsafe fn set(&self, p: bool, i_j: u32, j_i: u32, bit: u32, v: bool) {
+    unsafe fn set(&self, p: bool, i_j: usize, j_i: usize, bit: usize, v: bool) {
         let (ptr, bit) = self.locate_ptr_bit(p, i_j, j_i, bit);
         if v {
             *ptr = *ptr | (1 << bit);
@@ -61,32 +67,32 @@ impl BitRel {
         }
     }
 
-    unsafe fn get(&self, p: bool, i_j: u32, j_i: u32, bit: u32) -> bool {
+    unsafe fn get(&self, p: bool, i_j: usize, j_i: usize, bit: usize) -> bool {
         let (ptr, bit) = self.locate_ptr_bit(p, i_j, j_i, bit);
         let mask = 1 << bit;
         return *ptr & mask == mask;
     }
 
-    pub unsafe fn visited(&self, p: bool, i_j: u32, j_i: u32) -> bool {
+    pub unsafe fn visited(&self, p: bool, i_j: usize, j_i: usize) -> bool {
         self.get(p, i_j, j_i, 0)
     }
 
-    pub unsafe fn visit(&self, p: bool, i_j: u32, j_i: u32) {
+    pub unsafe fn visit(&self, p: bool, i_j: usize, j_i: usize) {
         self.set(p, i_j, j_i, 0, true)
     }
 
     #[allow(dead_code)]
     // NB: we store related bits in negated form to avoid setting on assumption
     // This code is a nop in production code.
-    pub unsafe fn assume(&self, p: bool, i_j: u32, j_i: u32) {
+    pub unsafe fn assume(&self, p: bool, i_j: usize, j_i: usize) {
         debug_assert!(!self.get(p, i_j, j_i, 1));
     }
 
-    pub unsafe fn related(&self, p: bool, i_j: u32, j_i: u32) -> bool {
+    pub unsafe fn related(&self, p: bool, i_j: usize, j_i: usize) -> bool {
         !self.get(p, i_j, j_i, 1)
     }
 
-    pub unsafe fn disprove(&self, p: bool, i_j: u32, j_i: u32) {
+    pub unsafe fn disprove(&self, p: bool, i_j: usize, j_i: usize) {
         self.set(p, i_j, j_i, 1, true)
     }
 }
diff --git a/rts/motoko-rts/src/blob_iter.rs b/rts/motoko-rts/src/blob_iter.rs
index 2e23e38f9ba..85f9ea94b7a 100644
--- a/rts/motoko-rts/src/blob_iter.rs
+++ b/rts/motoko-rts/src/blob_iter.rs
@@ -6,9 +6,9 @@ use crate::{
 
 use motoko_rts_macros::ic_mem_fn;
 
-const ITER_BLOB_IDX: u32 = 0;
+const ITER_BLOB_IDX: usize = 0;
 
-const ITER_POS_IDX: u32 = 1;
+const ITER_POS_IDX: usize = 1;
 
 /// Returns iterator for the given blob
 #[ic_mem_fn]
@@ -29,7 +29,7 @@ unsafe fn blob_iter(mem: &mut M, blob: Value) -> Value
 
 /// Returns whether the iterator is finished
 #[no_mangle]
-unsafe extern "C" fn blob_iter_done(iter: Value) -> u32 {
+unsafe extern "C" fn blob_iter_done(iter: Value) -> usize {
     let iter_array = iter.as_array();
 
     let blob = iter_array.get(ITER_BLOB_IDX);
@@ -40,7 +40,7 @@ unsafe extern "C" fn blob_iter_done(iter: Value) -> u32 {
 
 /// Reads next byte, advances the iterator
 #[ic_mem_fn]
-unsafe fn blob_iter_next(mem: &mut M, iter: Value) -> u32 {
+unsafe fn blob_iter_next(mem: &mut M, iter: Value) -> usize {
     let iter_array = iter.as_array();
 
     let blob = iter_array.get(ITER_BLOB_IDX);
diff --git a/rts/motoko-rts/src/buf.rs b/rts/motoko-rts/src/buf.rs
index c0867ac6d0d..40872b1682d 100644
--- a/rts/motoko-rts/src/buf.rs
+++ b/rts/motoko-rts/src/buf.rs
@@ -12,7 +12,7 @@ pub struct Buf {
 
 impl Buf {
     #[cfg(feature = "ic")]
-    pub(crate) unsafe fn advance(self: *mut Self, n: u32) {
+    pub(crate) unsafe fn advance(self: *mut Self, n: usize) {
         advance(self, n)
     }
 }
@@ -32,25 +32,30 @@ pub(crate) unsafe fn read_byte(buf: *mut Buf) -> u8 {
 #[cfg(feature = "ic")]
 /// Read a little-endian word
 pub(crate) unsafe fn read_word(buf: *mut Buf) -> u32 {
-    if (*buf).ptr.add(3) >= (*buf).end {
+    // IDL buffer is still 32-bit-based.
+    const WORD_SIZE: usize = core::mem::size_of::();
+
+    if (*buf).ptr.add(WORD_SIZE - 1) >= (*buf).end {
         idl_trap_with("word read out of buffer");
     }
 
     let p = (*buf).ptr;
-    let word = u32::from_le_bytes([*p, *p.add(1), *p.add(2), *p.add(3)]);
 
-    (*buf).ptr = (*buf).ptr.add(4);
+    let bytes: [u8; WORD_SIZE] = core::array::from_fn(|count| *p.add(count));
+    let word = u32::from_le_bytes(bytes);
+
+    (*buf).ptr = (*buf).ptr.add(WORD_SIZE);
 
     word
 }
 
 #[cfg(feature = "ic")]
-unsafe fn advance(buf: *mut Buf, n: u32) {
-    if (*buf).ptr.add(n as usize) > (*buf).end {
+unsafe fn advance(buf: *mut Buf, n: usize) {
+    if (*buf).ptr.add(n) > (*buf).end {
         idl_trap_with("advance out of buffer");
     }
 
-    (*buf).ptr = (*buf).ptr.add(n as usize);
+    (*buf).ptr = (*buf).ptr.add(n);
 }
 
 /// Can also be used for sleb
diff --git a/rts/motoko-rts/src/constants.rs b/rts/motoko-rts/src/constants.rs
index 327fe3f251c..3afd682d8c1 100644
--- a/rts/motoko-rts/src/constants.rs
+++ b/rts/motoko-rts/src/constants.rs
@@ -1,23 +1,27 @@
-use crate::types::{Bytes, Words};
+use motoko_rts_macros::classical_persistence;
 
-/// Wasm word size. RTS only works correctly on platforms with this word size.
-pub const WORD_SIZE: u32 = 4;
+use crate::types::Bytes;
 
-/// Maximum Motoko array size 2^29 (inclusive)
-/// NB: Must agree with Arr.max_array_size in compile.ml.
-pub const MAX_ARRAY_SIZE: u32 = 1 << 29;
+/// Wasm word size. RTS only works correctly on platforms with this word size.
+pub const WORD_SIZE: usize = core::mem::size_of::();
 
 /// Wasm page size (64 KiB) in bytes
-pub const WASM_PAGE_SIZE: Bytes = Bytes(64 * 1024);
-
-/// Wasm heap size (4 GiB) in words. Note that `to_bytes` on this value will overflow as 4 GiB in
-/// bytes is `u32::MAX + 1`.
-pub const WASM_HEAP_SIZE: Words = Words(1024 * 1024 * 1024);
-
-/// Wasm memory size (4 GiB) in bytes. Note: Represented as `u64` in order not to overflow.
-pub const WASM_MEMORY_BYTE_SIZE: Bytes = Bytes(4 * 1024 * 1024 * 1024);
+pub const WASM_PAGE_SIZE: Bytes = Bytes(64 * KB);
 
 /// Byte constants
 pub const KB: usize = 1024;
 pub const MB: usize = 1024 * KB;
 pub const GB: usize = 1024 * MB;
+
+#[classical_persistence]
+use crate::types::Words;
+
+/// Wasm 32-bit heap size (4 GiB) in words.
+/// Note that `to_bytes` on this value will overflow as 4 GiB in bytes is `u32::MAX + 1`.
+#[classical_persistence]
+pub const WASM32_HEAP_SIZE: Words = Words(1024 * 1024 * 1024);
+
+// The optimized array iterator requires array lengths to fit in signed compact numbers.
+// See `compile_enhanced.ml`, `GetPastArrayOffset`.
+// Two bits reserved: Two for Int tag (0b10L) and one for the sign bit.
+pub const MAX_ARRAY_LENGTH_FOR_ITERATOR: usize = 1 << (usize::BITS as usize - 3);
diff --git a/rts/motoko-rts/src/continuation_table.rs b/rts/motoko-rts/src/continuation_table.rs
index e53eebd9bb0..6f9ee486695 100644
--- a/rts/motoko-rts/src/continuation_table.rs
+++ b/rts/motoko-rts/src/continuation_table.rs
@@ -6,7 +6,7 @@
 //!
 //! It needs to support the following operations
 //!
-//!  1. Adding a continuation (any heap pointer) and getting an index (i32)
+//!  1. Adding a continuation (any heap pointer) and getting an index (usize)
 //!  2. Looking up a continuation by index, which also frees it
 //!  3. Peek into an existing continuation and hand back additional data
 //!  4. GC must be able to traverse and move continuations in the table
@@ -34,7 +34,7 @@ use crate::types::{Value, TAG_ARRAY_M};
 
 use motoko_rts_macros::ic_mem_fn;
 
-const INITIAL_SIZE: u32 = 256;
+const INITIAL_SIZE: usize = 256;
 
 // The static variables are re-initialized on canister upgrades and therefore not part of the
 // persistent metadata.
@@ -44,10 +44,10 @@ const INITIAL_SIZE: u32 = 256;
 static mut TABLE: Value = Value::from_scalar(0);
 
 // Number of currently live continuations
-static mut N_CONTINUATIONS: u32 = 0;
+static mut N_CONTINUATIONS: usize = 0;
 
 // Next free slot
-static mut FREE_SLOT: u32 = 0;
+static mut FREE_SLOT: usize = 0;
 
 unsafe fn create_continuation_table(mem: &mut M) {
     TABLE = alloc_array(mem, TAG_ARRAY_M, INITIAL_SIZE);
@@ -91,7 +91,7 @@ pub unsafe fn table_initialized() -> bool {
 }
 
 #[ic_mem_fn]
-pub unsafe fn remember_continuation(mem: &mut M, ptr: Value) -> u32 {
+pub unsafe fn remember_continuation(mem: &mut M, ptr: Value) -> usize {
     if !table_initialized() {
         create_continuation_table(mem);
     }
@@ -120,10 +120,10 @@ pub unsafe fn remember_continuation(mem: &mut M, ptr: Value) -> u32 {
 
 // Position of the future in explicit self-send ContinuationTable entries
 // Invariant: keep this synchronised with compiler.ml (see future_array_index)
-const FUTURE_ARRAY_INDEX: u32 = 3;
+const FUTURE_ARRAY_INDEX: usize = 3;
 
 #[no_mangle]
-pub unsafe extern "C" fn peek_future_continuation(idx: u32) -> Value {
+pub unsafe extern "C" fn peek_future_continuation(idx: usize) -> Value {
     if !table_initialized() {
         rts_trap_with("peek_future_continuation: Continuation table not allocated");
     }
@@ -142,7 +142,7 @@ pub unsafe extern "C" fn peek_future_continuation(idx: u32) -> Value {
 }
 
 #[ic_mem_fn]
-pub unsafe fn recall_continuation(mem: &mut M, idx: u32) -> Value {
+pub unsafe fn recall_continuation(mem: &mut M, idx: usize) -> Value {
     if !table_initialized() {
         rts_trap_with("recall_continuation: Continuation table not allocated");
     }
@@ -169,7 +169,7 @@ pub unsafe fn recall_continuation(mem: &mut M, idx: u32) -> Value {
 }
 
 #[no_mangle]
-pub unsafe extern "C" fn continuation_count() -> u32 {
+pub unsafe extern "C" fn continuation_count() -> usize {
     N_CONTINUATIONS
 }
 
@@ -180,7 +180,7 @@ pub(crate) unsafe fn continuation_table_loc() -> *mut Value {
 
 #[cfg(feature = "ic")]
 #[no_mangle]
-unsafe extern "C" fn continuation_table_size() -> u32 {
+unsafe extern "C" fn continuation_table_size() -> usize {
     if !table_initialized() {
         0
     } else {
diff --git a/rts/motoko-rts/src/debug.rs b/rts/motoko-rts/src/debug.rs
index b60f136b051..9b0b2c4373c 100644
--- a/rts/motoko-rts/src/debug.rs
+++ b/rts/motoko-rts/src/debug.rs
@@ -1,5 +1,8 @@
 #![allow(dead_code)]
 
+use motoko_rts_macros::classical_persistence;
+use motoko_rts_macros::enhanced_orthogonal_persistence;
+
 use crate::print::*;
 use crate::types::*;
 
@@ -21,8 +24,8 @@ pub unsafe extern "C" fn print_value(value: Value) {
 }
 
 pub unsafe fn dump_heap(
-    heap_base: u32,
-    hp: u32,
+    heap_base: usize,
+    hp: usize,
     static_root_location: *mut Value,
     continuation_table_location: *mut Value,
 ) {
@@ -52,7 +55,7 @@ pub(crate) unsafe fn print_continuation_table(continuation_tbl_loc: *mut Value)
 
     for i in 0..len {
         let elem = arr.get(i);
-        if elem.is_non_null_ptr() {
+        if is_valid_pointer(elem) {
             let _ = write!(&mut write_buf, "{}: ", i);
             print_boxed_object(&mut write_buf, elem.get_ptr());
             print(&write_buf);
@@ -62,6 +65,16 @@ pub(crate) unsafe fn print_continuation_table(continuation_tbl_loc: *mut Value)
     println!(50, "End of continuation table");
 }
 
+#[classical_persistence]
+fn is_valid_pointer(value: Value) -> bool {
+    value.is_ptr()
+}
+
+#[enhanced_orthogonal_persistence]
+fn is_valid_pointer(value: Value) -> bool {
+    value.is_non_null_ptr()
+}
+
 pub(crate) unsafe fn print_static_roots(static_roots: Value) {
     let static_roots = static_roots.as_array();
     println!(100, "static roots at {:#x}", static_roots as usize);
@@ -80,7 +93,7 @@ pub(crate) unsafe fn print_static_roots(static_roots: Value) {
 
     let payload_addr = static_roots.payload_addr();
     for i in 0..len {
-        let field_addr = payload_addr.add(i as usize);
+        let field_addr = payload_addr.add(i);
         let _ = write!(&mut write_buf, "{}: {:#x} --> ", i, field_addr as usize);
         print_boxed_object(&mut write_buf, (*field_addr).get_ptr());
         print(&write_buf);
@@ -90,7 +103,7 @@ pub(crate) unsafe fn print_static_roots(static_roots: Value) {
     println!(50, "End of static roots");
 }
 
-unsafe fn print_heap(heap_start: u32, heap_end: u32) {
+unsafe fn print_heap(heap_start: usize, heap_end: usize) {
     println!(
         200,
         "Heap start={:#x}, heap end={:#x}, size={} bytes",
@@ -103,19 +116,19 @@ unsafe fn print_heap(heap_start: u32, heap_end: u32) {
     let mut write_buf = WriteBuf::new(&mut buf);
 
     let mut p = heap_start;
-    let mut i: Words = Words(0);
+    let mut i: Words = Words(0);
     while p < heap_end {
-        print_boxed_object(&mut write_buf, p as usize);
+        print_boxed_object(&mut write_buf, p);
         print(&write_buf);
         write_buf.reset();
 
-        let obj_size = block_size(p as usize);
-        p += obj_size.to_bytes().as_u32();
+        let obj_size = block_size(p);
+        p += obj_size.to_bytes().as_usize();
         i += obj_size;
     }
 }
 
-unsafe fn print_tagged_scalar(buf: &mut WriteBuf, p: u32) {
+unsafe fn print_tagged_scalar(buf: &mut WriteBuf, p: usize) {
     let _ = write!(buf, "", p);
 }
 
@@ -123,13 +136,13 @@ unsafe fn print_tagged_scalar(buf: &mut WriteBuf, p: u32) {
 pub(crate) unsafe fn print_boxed_object(buf: &mut WriteBuf, p: usize) {
     let _ = write!(buf, "{:#x}: ", p);
 
-    let obj = p as *mut Obj;
-    let forward = (*obj).forward;
+    let forward = (*(p as *mut Value)).forward();
     if forward.get_ptr() != p {
         let _ = write!(buf, "", forward.get_ptr());
         return;
     }
 
+    let obj = p as *mut Obj;
     let tag = obj.tag();
 
     if tag == 0 {
@@ -143,7 +156,7 @@ pub(crate) unsafe fn print_boxed_object(buf: &mut WriteBuf, p: usize) {
                 buf,
                 " {
             let blob = obj.as_blob();
-            let _ = write!(buf, "", blob.len().as_u32());
+            let _ = write!(buf, "", blob.len().as_usize());
         }
         TAG_FWD_PTR => {
             let ind = obj as *const FwdPtr;
             let _ = write!(buf, "", (*ind).fwd.get_raw());
         }
-        TAG_BITS32_U | TAG_BITS32_S | TAG_BITS32_F => {
-            let bits32 = obj as *const Bits32;
-            let _ = write!(buf, "", (*bits32).bits);
-        }
         TAG_BIGINT => {
             // Add more details here as needed
             let _ = write!(buf, "");
@@ -221,7 +230,7 @@ pub(crate) unsafe fn print_boxed_object(buf: &mut WriteBuf, p: usize) {
             let _ = write!(
                 buf,
                 "",
-                (*concat).n_bytes.as_u32(),
+                (*concat).n_bytes.as_usize(),
                 (*concat).text1.get_raw(),
                 (*concat).text2.get_raw()
             );
@@ -231,10 +240,20 @@ pub(crate) unsafe fn print_boxed_object(buf: &mut WriteBuf, p: usize) {
         }
         TAG_FREE_SPACE => {
             let free_space = obj as *const FreeSpace;
-            let _ = write!(buf, "", (*free_space).words.as_u32());
+            let _ = write!(buf, "", (*free_space).words.as_usize());
         }
         other => {
             let _ = write!(buf, "", other);
         }
     }
 }
+
+#[classical_persistence]
+unsafe fn get_obj_hash_pointer(object: *mut Object) -> usize {
+    (*object).hash_ptr
+}
+
+#[enhanced_orthogonal_persistence]
+unsafe fn get_obj_hash_pointer(object: *mut Object) -> usize {
+    (*object).hash_blob.get_raw()
+}
diff --git a/rts/motoko-rts/src/float.rs b/rts/motoko-rts/src/float.rs
index 644823e4a15..0fee1086138 100644
--- a/rts/motoko-rts/src/float.rs
+++ b/rts/motoko-rts/src/float.rs
@@ -1,38 +1,7 @@
-use crate::memory::Memory;
-use crate::text::text_of_ptr_size;
-use crate::types::{Bytes, Value};
+#[enhanced_orthogonal_persistence]
+pub mod enhanced;
 
-use motoko_rts_macros::ic_mem_fn;
+#[classical_persistence]
+pub mod classical;
 
-// The meaning of the `mode` parameter is documented in motoko-base, function Float.format()
-#[ic_mem_fn]
-unsafe fn float_fmt(mem: &mut M, a: f64, prec: u32, mode: u32) -> Value {
-    // prec and mode are tagged small words (`Nat8`s), so we shift 24 bits. See
-    // `TaggedSmallWord.bits_of_type` in compile.ml.
-    let mode = mode >> 24;
-    let prec = core::cmp::min(prec >> 24, 100) as usize;
-
-    // 320 bytes needed for max precision (1.7e308)
-    let buf = [0u8; 320];
-
-    // NB. Using snprintf because I think only 0 and 3 are supposed by Rust's built-in formatter
-    let fmt = match mode {
-        0 => "%.*f\0",
-        1 => "%.*e\0",
-        2 => "%.*g\0",
-        3 => "%.*a\0",
-        _ => panic!("float_fmt: unrecognized mode"),
-    };
-
-    let n_written = libc::snprintf(
-        buf.as_ptr() as *mut _,
-        320,
-        fmt.as_ptr() as *const _,
-        prec,
-        a as libc::c_double,
-    );
-
-    assert!(n_written > 0);
-
-    text_of_ptr_size(mem, buf.as_ptr(), Bytes(n_written as u32))
-}
+use motoko_rts_macros::{classical_persistence, enhanced_orthogonal_persistence};
diff --git a/rts/motoko-rts/src/float/classical.rs b/rts/motoko-rts/src/float/classical.rs
new file mode 100644
index 00000000000..1ea8ce8ccb7
--- /dev/null
+++ b/rts/motoko-rts/src/float/classical.rs
@@ -0,0 +1,39 @@
+use crate::libc_declarations::c_double;
+use crate::memory::Memory;
+use crate::text::text_of_ptr_size;
+use crate::types::{Bytes, Value};
+
+use motoko_rts_macros::ic_mem_fn;
+
+// The meaning of the `mode` parameter is documented in motoko-base, function Float.format()
+#[ic_mem_fn]
+unsafe fn float_fmt(mem: &mut M, a: f64, prec: u32, mode: u32) -> Value {
+    // prec and mode are tagged small words (`Nat8`s), so we shift 24 bits. See
+    // `TaggedSmallWord.bits_of_type` in compile.ml.
+    let mode = mode >> 24;
+    let prec = core::cmp::min(prec >> 24, 100) as usize;
+
+    // 320 bytes needed for max precision (1.7e308)
+    let buf = [0u8; 320];
+
+    // NB. Using snprintf because I think only 0 and 3 are supposed by Rust's built-in formatter
+    let fmt = match mode {
+        0 => "%.*f\0",
+        1 => "%.*e\0",
+        2 => "%.*g\0",
+        3 => "%.*a\0",
+        _ => panic!("float_fmt: unrecognized mode"),
+    };
+
+    let n_written = libc::snprintf(
+        buf.as_ptr() as *mut _,
+        320,
+        fmt.as_ptr() as *const _,
+        prec,
+        a as c_double,
+    );
+
+    assert!(n_written > 0);
+
+    text_of_ptr_size(mem, buf.as_ptr(), Bytes(n_written as usize))
+}
diff --git a/rts/motoko-rts/src/float/enhanced.rs b/rts/motoko-rts/src/float/enhanced.rs
new file mode 100644
index 00000000000..9055880fb83
--- /dev/null
+++ b/rts/motoko-rts/src/float/enhanced.rs
@@ -0,0 +1,39 @@
+use crate::memory::Memory;
+use crate::text::text_of_ptr_size;
+use crate::types::{Bytes, Value};
+
+use motoko_rts_macros::ic_mem_fn;
+
+// The meaning of the `mode` parameter is documented in motoko-base, function Float.format()
+#[ic_mem_fn]
+pub unsafe fn float_fmt(mem: &mut M, a: f64, prec: usize, mode: usize) -> Value {
+    // prec and mode are tagged small words (`Nat8`s), so we shift 56 bits. See
+    // `TaggedSmallWord.bits_of_type` in compile.ml.
+    let mode = mode >> 56;
+    let prec = core::cmp::min(prec >> 56, 100) as usize;
+
+    // 320 bytes needed for max precision (1.7e308)
+    const BUFFER_LENGTH: usize = 320;
+    let buffer = match mode {
+        0 => format!(BUFFER_LENGTH, "{:.*}", prec, a),
+        1 => format!(BUFFER_LENGTH, "{:.*e}", prec, a),
+        2 => format!(BUFFER_LENGTH, "{:.*}", prec, a),
+        3 => panic!("float_fmt: unsupported mode"), // TODO: Support this mode (or deprecate in base library).
+        4 => format!(BUFFER_LENGTH, "{}", a),
+        _ => panic!("float_fmt: unrecognized mode"),
+    };
+
+    // TODO: Certain modes are not supported such as hexadecimal output (mode 3).
+
+    let length = written_length(&buffer);
+    text_of_ptr_size(mem, buffer.as_ptr(), Bytes(length))
+}
+
+fn written_length(buffer: &[u8]) -> usize {
+    for index in 0..buffer.len() {
+        if buffer[index] == 0 {
+            return index;
+        }
+    }
+    buffer.len()
+}
diff --git a/rts/motoko-rts/src/gc.rs b/rts/motoko-rts/src/gc.rs
index a2b3e2acd74..31663df866f 100644
--- a/rts/motoko-rts/src/gc.rs
+++ b/rts/motoko-rts/src/gc.rs
@@ -1 +1,28 @@
+#[non_incremental_gc]
+pub mod copying;
+#[non_incremental_gc]
+pub mod generational;
+#[incremental_gc]
 pub mod incremental;
+#[non_incremental_gc]
+pub mod mark_compact;
+
+pub mod remembered_set;
+
+use motoko_rts_macros::*;
+
+#[cfg(feature = "ic")]
+#[non_incremental_gc]
+unsafe fn should_do_gc(max_live: crate::types::Bytes) -> bool {
+    use crate::memory::ic::linear_memory::{get_hp_unskewed, LAST_HP};
+
+    // A factor of last heap size. We allow at most this much allocation before doing GC.
+    const HEAP_GROWTH_FACTOR: f64 = 1.5;
+
+    let heap_limit = core::cmp::min(
+        (f64::from(LAST_HP as u32) * HEAP_GROWTH_FACTOR) as u64,
+        (u64::from(LAST_HP as u32) + max_live.0) / 2,
+    );
+
+    u64::from(get_hp_unskewed() as u32) >= heap_limit
+}
diff --git a/rts/motoko-rts/src/gc/copying.rs b/rts/motoko-rts/src/gc/copying.rs
new file mode 100644
index 00000000000..378dfada791
--- /dev/null
+++ b/rts/motoko-rts/src/gc/copying.rs
@@ -0,0 +1,229 @@
+use crate::constants::WORD_SIZE;
+use crate::mem_utils::{memcpy_bytes, memcpy_words};
+use crate::memory::Memory;
+use crate::types::*;
+
+use motoko_rts_macros::ic_mem_fn;
+
+// Only designed for 32-bit.
+const _: () = assert!(core::mem::size_of::() == core::mem::size_of::());
+
+#[no_mangle]
+#[cfg(feature = "ic")]
+pub unsafe extern "C" fn initialize_copying_gc() {
+    crate::memory::ic::linear_memory::initialize();
+}
+
+#[ic_mem_fn(ic_only)]
+unsafe fn schedule_copying_gc(mem: &mut M) {
+    // Half of the heap.
+    // NB. This expression is evaluated in compile time to a constant.
+    let max_live: Bytes =
+        Bytes((crate::constants::WASM32_HEAP_SIZE / 2).as_usize() as u64 * WORD_SIZE as u64);
+
+    if super::should_do_gc(max_live) {
+        copying_gc(mem);
+    }
+}
+
+#[ic_mem_fn(ic_only)]
+unsafe fn copying_gc(mem: &mut M) {
+    use crate::memory::ic::{self, linear_memory};
+
+    copying_gc_internal(
+        mem,
+        ic::get_aligned_heap_base(),
+        // get_hp
+        || linear_memory::get_hp_unskewed(),
+        // set_hp
+        |hp| linear_memory::set_hp_unskewed(hp),
+        ic::get_static_roots(),
+        crate::continuation_table::continuation_table_loc(),
+        crate::region::region0_get_ptr_loc(),
+        // note_live_size
+        |live_size| linear_memory::MAX_LIVE = ::core::cmp::max(linear_memory::MAX_LIVE, live_size),
+        // note_reclaimed
+        |reclaimed| linear_memory::RECLAIMED += Bytes(reclaimed.as_usize() as u64),
+    );
+
+    linear_memory::LAST_HP = linear_memory::get_hp_unskewed();
+}
+
+pub unsafe fn copying_gc_internal<
+    M: Memory,
+    GetHp: Fn() -> usize,
+    SetHp: FnMut(usize),
+    NoteLiveSize: Fn(Bytes),
+    NoteReclaimed: Fn(Bytes),
+>(
+    mem: &mut M,
+    heap_base: usize,
+    get_hp: GetHp,
+    mut set_hp: SetHp,
+    static_roots: Value,
+    continuation_table_ptr_loc: *mut Value,
+    region0_ptr_loc: *mut Value,
+    note_live_size: NoteLiveSize,
+    note_reclaimed: NoteReclaimed,
+) {
+    let begin_from_space = heap_base as usize;
+    let end_from_space = get_hp();
+    let begin_to_space = end_from_space;
+
+    let static_roots = static_roots.as_array();
+
+    // Evacuate roots
+    evac_static_roots(mem, begin_from_space, begin_to_space, static_roots);
+
+    if (*continuation_table_ptr_loc).is_ptr() {
+        evac(
+            mem,
+            begin_from_space,
+            begin_to_space,
+            continuation_table_ptr_loc as usize,
+        );
+    }
+
+    if (*region0_ptr_loc).is_ptr() {
+        // Region0 is not always a pointer during GC?
+        evac(
+            mem,
+            begin_from_space,
+            begin_to_space,
+            region0_ptr_loc as usize,
+        );
+    }
+
+    // Scavenge to-space
+    let mut p = begin_to_space;
+    while p < get_hp() {
+        let size = block_size(p);
+        scav(mem, begin_from_space, begin_to_space, Value::from_ptr(p));
+        p += size.to_bytes().as_usize();
+    }
+
+    let end_to_space = get_hp();
+
+    // Note the stats
+    let new_live_size = end_to_space - begin_to_space;
+    note_live_size(Bytes(new_live_size));
+
+    let reclaimed = (end_from_space - begin_from_space) - (end_to_space - begin_to_space);
+    note_reclaimed(Bytes(reclaimed));
+
+    // Copy to-space to the beginning of from-space
+    memcpy_bytes(
+        begin_from_space,
+        begin_to_space,
+        Bytes(end_to_space - begin_to_space),
+    );
+
+    // Reset the heap pointer
+    let new_hp = begin_from_space + (end_to_space - begin_to_space);
+    set_hp(new_hp);
+}
+
+/// Evacuate (copy) an object in from-space to to-space.
+///
+/// Arguments:
+///
+/// - begin_from_space: Where the dynamic heap starts. Used for two things:
+///
+///   - An object is static if its address is below this value. These objects either don't point to
+///     dynamic heap, or are listed in static_roots array. Objects in static_roots are scavenged
+///     separately in `evac_static_roots` below. So we skip these objects here.
+///
+///   - After all objects are evacuated we move to-space to from-space, to be able to do that the
+///     pointers need to point to their (eventual) locations in from-space, which is calculated with
+///     `address_in_to_space - begin_to_space + begin_from_space`.
+///
+/// - begin_to_space: Where to-space starts. See above for how this is used.
+///
+/// - ptr_loc: Location of the object to evacuate, e.g. an object field address.
+///
+unsafe fn evac(
+    mem: &mut M,
+    begin_from_space: usize,
+    begin_to_space: usize,
+    ptr_loc: usize,
+) {
+    // Field holds a skewed pointer to the object to evacuate
+    let ptr_loc = ptr_loc as *mut Value;
+
+    // Check object alignment to avoid undefined behavior. See also static_checks module.
+    debug_assert_eq!((*ptr_loc).get_ptr() % WORD_SIZE, 0);
+
+    // Update the field if the object is already evacuated
+    if (*ptr_loc).tag() == TAG_FWD_PTR {
+        let block = (*ptr_loc).get_ptr() as *const FwdPtr;
+        let fwd = (*block).fwd;
+        *ptr_loc = fwd;
+        return;
+    }
+
+    let obj = (*ptr_loc).get_ptr() as *mut Obj;
+
+    let obj_size = block_size(obj as usize);
+
+    // Allocate space in to-space for the object
+    let obj_addr = mem.alloc_words(obj_size).get_ptr();
+
+    // Copy object to to-space
+    memcpy_words(obj_addr, obj as usize, obj_size);
+
+    // Final location of the object after copying to-space back to from-space
+    let obj_loc = (obj_addr - begin_to_space) + begin_from_space;
+
+    // Set forwarding pointer
+    let fwd = obj as *mut FwdPtr;
+    (*fwd).tag = TAG_FWD_PTR;
+    (*fwd).fwd = Value::from_ptr(obj_loc);
+
+    // Update evacuated field
+    *ptr_loc = Value::from_ptr(obj_loc);
+
+    // Update forwarding pointer
+    let to_space_obj = obj_addr as *mut Obj;
+    debug_assert!(obj_size.as_usize() > size_of::().as_usize());
+    debug_assert!(to_space_obj.tag() >= TAG_OBJECT && to_space_obj.tag() <= TAG_NULL);
+}
+
+unsafe fn scav(
+    mem: &mut M,
+    begin_from_space: usize,
+    begin_to_space: usize,
+    block: Value,
+) {
+    if !block.is_obj() {
+        // Skip `OneWordFiller` and `FreeSpace` that have no regular object header.
+        return;
+    }
+    let obj = block.get_ptr() as *mut Obj;
+
+    crate::visitor::visit_pointer_fields(
+        mem,
+        obj,
+        obj.tag(),
+        begin_from_space,
+        |mem, field_addr| {
+            evac(mem, begin_from_space, begin_to_space, field_addr as usize);
+        },
+        |_, _, arr| arr.len(),
+    );
+}
+
+// We have a special evacuation routine for "static roots" array: we don't evacuate elements of
+// "static roots", we just scavenge them.
+unsafe fn evac_static_roots(
+    mem: &mut M,
+    begin_from_space: usize,
+    begin_to_space: usize,
+    roots: *mut Array,
+) {
+    // The array and the objects pointed by the array are all static so we don't evacuate them. We
+    // only evacuate fields of objects in the array.
+    for i in 0..roots.len() {
+        let obj = roots.get(i);
+        scav(mem, begin_from_space, begin_to_space, obj);
+    }
+}
diff --git a/rts/motoko-rts/src/gc/generational.rs b/rts/motoko-rts/src/gc/generational.rs
new file mode 100644
index 00000000000..abe23ae3fa7
--- /dev/null
+++ b/rts/motoko-rts/src/gc/generational.rs
@@ -0,0 +1,531 @@
+//! Generational compacting GC.
+//! Two generations: young and old.
+//! Frequent collection of young generation, sporadic full collection (old + young).
+//! Young generation collection requires an extra root set of old-to-young pointers.
+//! A write barrier catches all pointers leading from old to young generation.
+//! Compaction is based on the existing Motoko RTS threaded mark & compact GC.
+
+pub mod mark_stack;
+#[cfg(feature = "memory_check")]
+mod sanity_checks;
+pub mod write_barrier;
+
+use crate::gc::generational::mark_stack::{alloc_mark_stack, push_mark_stack};
+use crate::gc::mark_compact::bitmap::{
+    alloc_bitmap, free_bitmap, get_bit, iter_bits, set_bit, BITMAP_ITER_END,
+};
+
+use crate::constants::WORD_SIZE;
+use crate::mem_utils::memcpy_words;
+use crate::memory::Memory;
+use crate::types::*;
+use crate::visitor::{classical::pointer_to_dynamic_heap, visit_pointer_fields};
+
+use motoko_rts_macros::ic_mem_fn;
+
+use self::mark_stack::{free_mark_stack, pop_mark_stack};
+use self::write_barrier::REMEMBERED_SET;
+
+// Only designed for 32-bit.
+const _: () = assert!(core::mem::size_of::() == core::mem::size_of::());
+
+#[ic_mem_fn(ic_only)]
+unsafe fn initialize_generational_gc(mem: &mut M) {
+    crate::memory::ic::linear_memory::initialize();
+    write_barrier::init_generational_write_barrier(mem);
+}
+
+#[ic_mem_fn(ic_only)]
+unsafe fn schedule_generational_gc(mem: &mut M) {
+    let limits = get_limits();
+    if decide_strategy(&limits).is_some() {
+        generational_gc(mem);
+    }
+}
+
+#[ic_mem_fn(ic_only)]
+unsafe fn generational_gc(mem: &mut M) {
+    use crate::memory::ic;
+
+    let old_limits = get_limits();
+    let roots = Roots {
+        static_roots: ic::get_static_roots(),
+        region0_ptr_loc: crate::region::region0_get_ptr_loc(),
+        continuation_table_ptr_loc: crate::continuation_table::continuation_table_loc(),
+    };
+    let heap = Heap {
+        mem,
+        limits: get_limits(),
+        roots,
+    };
+    let strategy = decide_strategy(&heap.limits);
+
+    let strategy = strategy.unwrap_or(Strategy::Young);
+    let mut gc = GenerationalGC::new(heap, strategy);
+
+    #[cfg(feature = "memory_check")]
+    sanity_checks::verify_snapshot(&gc.heap, false);
+
+    gc.run();
+
+    let new_limits = &gc.heap.limits;
+    set_limits(&gc.heap.limits);
+    update_statistics(&old_limits, new_limits);
+    update_strategy(strategy, new_limits);
+
+    #[cfg(feature = "memory_check")]
+    sanity_checks::check_memory(&gc.heap.limits, &gc.heap.roots);
+    #[cfg(feature = "memory_check")]
+    sanity_checks::take_snapshot(&mut gc.heap);
+
+    write_barrier::init_generational_write_barrier(gc.heap.mem);
+}
+
+#[cfg(feature = "ic")]
+unsafe fn get_limits() -> Limits {
+    use crate::memory::ic::{self, linear_memory};
+    assert!(linear_memory::LAST_HP >= ic::get_aligned_heap_base());
+    Limits {
+        base: ic::get_aligned_heap_base(),
+        last_free: linear_memory::LAST_HP,
+        free: (linear_memory::get_hp_unskewed()),
+    }
+}
+
+#[cfg(feature = "ic")]
+unsafe fn set_limits(limits: &Limits) {
+    use crate::memory::ic::linear_memory;
+    linear_memory::set_hp_unskewed(limits.free);
+    linear_memory::LAST_HP = limits.free;
+}
+
+#[cfg(feature = "ic")]
+unsafe fn update_statistics(old_limits: &Limits, new_limits: &Limits) {
+    use crate::memory::ic::linear_memory;
+    let live_size = Bytes(new_limits.free - new_limits.base);
+    linear_memory::MAX_LIVE = ::core::cmp::max(linear_memory::MAX_LIVE, live_size);
+    linear_memory::RECLAIMED += Bytes(old_limits.free as u64 - new_limits.free as u64);
+}
+
+#[derive(PartialEq, Clone, Copy, Debug)]
+pub enum Strategy {
+    Young,
+    Full,
+}
+
+#[cfg(feature = "ic")]
+static mut OLD_GENERATION_THRESHOLD: usize = 32 * 1024 * 1024;
+
+#[cfg(feature = "ic")]
+static mut PASSED_CRITICAL_LIMIT: bool = false;
+
+#[cfg(feature = "ic")]
+const CRITICAL_MEMORY_LIMIT: usize =
+    (4096 - 512) * 1024 * 1024 - crate::memory::GENERAL_MEMORY_RESERVE;
+
+#[cfg(feature = "ic")]
+unsafe fn decide_strategy(limits: &Limits) -> Option {
+    const YOUNG_GENERATION_THRESHOLD: usize = 8 * 1024 * 1024;
+
+    assert!(limits.base <= limits.last_free);
+    let old_generation_size = limits.last_free - limits.base;
+    assert!(limits.last_free <= limits.free);
+    let young_generation_size = limits.free - limits.last_free;
+
+    if limits.free >= CRITICAL_MEMORY_LIMIT && !PASSED_CRITICAL_LIMIT {
+        PASSED_CRITICAL_LIMIT = true;
+        Some(Strategy::Full)
+    } else if old_generation_size > OLD_GENERATION_THRESHOLD {
+        Some(Strategy::Full)
+    } else if young_generation_size > YOUNG_GENERATION_THRESHOLD {
+        Some(Strategy::Young)
+    } else {
+        None
+    }
+}
+
+#[cfg(feature = "ic")]
+unsafe fn update_strategy(strategy: Strategy, limits: &Limits) {
+    const GROWTH_RATE: f64 = 2.0;
+    if strategy == Strategy::Full {
+        OLD_GENERATION_THRESHOLD = ((limits.free - limits.base) as f64 * GROWTH_RATE) as usize;
+        if limits.free < CRITICAL_MEMORY_LIMIT {
+            PASSED_CRITICAL_LIMIT = false
+        }
+    }
+}
+
+pub struct Heap<'a, M: Memory> {
+    pub mem: &'a mut M,
+    pub limits: Limits,
+    pub roots: Roots,
+}
+
+pub struct Roots {
+    pub static_roots: Value,
+    pub continuation_table_ptr_loc: *mut Value,
+    pub region0_ptr_loc: *mut Value,
+    // For possible future additional roots, please extend the functionality in:
+    // * `mark_root_set`
+    // * `thread_initial_phase`
+}
+
+pub struct Limits {
+    pub base: usize,
+    pub last_free: usize, // this separates the old generation from the young generation
+    pub free: usize,
+}
+
+pub struct GenerationalGC<'a, M: Memory> {
+    pub heap: Heap<'a, M>,
+    marked_space: usize,
+    strategy: Strategy,
+}
+
+impl<'a, M: Memory> GenerationalGC<'a, M> {
+    pub fn new(heap: Heap, strategy: Strategy) -> GenerationalGC {
+        GenerationalGC {
+            heap,
+            marked_space: 0,
+            strategy,
+        }
+    }
+
+    pub unsafe fn run(&mut self) {
+        self.alloc_mark_structures();
+        self.mark_phase();
+        self.compact_phase();
+        self.free_mark_structures();
+    }
+
+    unsafe fn alloc_mark_structures(&mut self) {
+        const BITMAP_ALIGNMENT: usize = 8 * WORD_SIZE as usize;
+        let heap_prefix = match self.strategy {
+            Strategy::Young => self.heap.limits.last_free / BITMAP_ALIGNMENT * BITMAP_ALIGNMENT,
+            Strategy::Full => self.heap.limits.base,
+        };
+        let heap_size = Bytes(self.heap.limits.free - heap_prefix);
+        alloc_bitmap(self.heap.mem, heap_size, heap_prefix / WORD_SIZE);
+        alloc_mark_stack(self.heap.mem);
+    }
+
+    unsafe fn free_mark_structures(&mut self) {
+        free_mark_stack();
+        free_bitmap();
+    }
+
+    unsafe fn mark_phase(&mut self) {
+        self.marked_space = 0;
+        self.mark_root_set();
+        self.mark_all_reachable();
+    }
+
+    unsafe fn mark_root_set(&mut self) {
+        self.mark_static_roots();
+
+        let continuation_table = *self.heap.roots.continuation_table_ptr_loc;
+        if continuation_table.is_ptr() && continuation_table.get_ptr() >= self.generation_base() {
+            self.mark_object(continuation_table);
+        }
+
+        let region0 = *self.heap.roots.region0_ptr_loc;
+        if region0.is_ptr() && region0.get_ptr() >= self.generation_base() {
+            self.mark_object(region0);
+        }
+
+        if self.strategy == Strategy::Young {
+            self.mark_additional_young_root_set();
+        }
+    }
+
+    unsafe fn mark_static_roots(&mut self) {
+        let root_array = self.heap.roots.static_roots.as_array();
+        for i in 0..root_array.len() {
+            let object = root_array.get(i).as_obj();
+            assert_eq!(object.tag(), TAG_MUTBOX);
+            assert!((object as usize) < self.heap.limits.base);
+            self.mark_root_mutbox_fields(object as *mut MutBox);
+        }
+    }
+
+    unsafe fn mark_additional_young_root_set(&mut self) {
+        let mut iterator = REMEMBERED_SET.as_ref().unwrap().iterate();
+        while iterator.has_next() {
+            let location = iterator.current().get_raw() as *mut Value;
+            let value = *location;
+            // Check whether the location still refers to young object as this may have changed
+            // due to subsequent writes to that location after the write barrier recording.
+            if value.points_to_or_beyond(self.heap.limits.last_free) {
+                self.mark_object(value);
+            }
+            iterator.next();
+        }
+    }
+
+    unsafe fn mark_object(&mut self, object: Value) {
+        let pointer = object.get_ptr();
+        assert!(pointer >= self.generation_base());
+        assert_eq!(pointer % WORD_SIZE, 0);
+
+        let obj_idx = pointer / WORD_SIZE;
+        if get_bit(obj_idx) {
+            return;
+        }
+        set_bit(obj_idx);
+
+        push_mark_stack(self.heap.mem, pointer as usize);
+        self.marked_space += block_size(pointer as usize).to_bytes().as_usize();
+    }
+
+    unsafe fn mark_all_reachable(&mut self) {
+        while let Some(obj) = pop_mark_stack() {
+            self.mark_fields(obj as *mut Obj);
+        }
+    }
+
+    unsafe fn mark_fields(&mut self, object: *mut Obj) {
+        visit_pointer_fields(
+            self,
+            object,
+            object.tag(),
+            self.generation_base(),
+            |gc, field_address| {
+                let field_value = *field_address;
+                gc.mark_object(field_value);
+
+                // Should become a debug assertion in future.
+                gc.barrier_coverage_check(field_address);
+            },
+            |gc, slice_start, array| {
+                const SLICE_INCREMENT: usize = 255;
+                debug_assert!(SLICE_INCREMENT >= TAG_ARRAY_SLICE_MIN);
+                let base_tag = array.base_tag();
+                if array.len() - slice_start > SLICE_INCREMENT {
+                    let new_start = slice_start + SLICE_INCREMENT;
+                    // Remember to visit the array suffix later, store the next visit offset in the tag.
+                    array.set_slice_start(base_tag, new_start);
+                    push_mark_stack(gc.heap.mem, array as usize);
+                    new_start
+                } else {
+                    // No further visits of this array. Restore the tag.
+                    array.restore_tag(base_tag); // restore original tag
+                    array.len()
+                }
+            },
+        );
+    }
+
+    unsafe fn barrier_coverage_check(&self, field_address: *mut Value) {
+        if self.strategy == Strategy::Full
+            && (field_address as usize) >= self.heap.limits.base
+            && (field_address as usize) < self.heap.limits.last_free
+            && (*field_address).points_to_or_beyond(self.heap.limits.last_free)
+        {
+            assert!(REMEMBERED_SET
+                .as_ref()
+                .unwrap()
+                .contains(Value::from_raw(field_address as usize)));
+        }
+    }
+
+    unsafe fn mark_root_mutbox_fields(&mut self, mutbox: *mut MutBox) {
+        let field_address = &mut (*mutbox).field;
+        if pointer_to_dynamic_heap(field_address, self.generation_base()) {
+            self.mark_object(*field_address);
+        }
+    }
+
+    unsafe fn compact_phase(&mut self) {
+        if self.is_compaction_beneficial() {
+            self.thread_initial_phase();
+            self.move_phase();
+        }
+    }
+
+    fn is_compaction_beneficial(&self) -> bool {
+        // Returns false if the survival rate is f64::INF for an empty generation.
+        const SURVIVAL_THRESHOLD: f64 = 0.95;
+        self.survival_rate() < SURVIVAL_THRESHOLD
+    }
+
+    fn generation_base(&self) -> usize {
+        match self.strategy {
+            Strategy::Young => self.heap.limits.last_free,
+            Strategy::Full => self.heap.limits.base,
+        }
+    }
+
+    fn generation_size(&self) -> usize {
+        self.heap.limits.free - self.generation_base()
+    }
+
+    fn survival_rate(&self) -> f64 {
+        // Returns f64::INF if the generation size is zero, e.g. on forced GC.
+        self.marked_space as f64 / self.generation_size() as f64
+    }
+
+    unsafe fn thread_initial_phase(&mut self) {
+        self.thread_all_backward_pointers();
+
+        // For static roots, also forward pointers are threaded.
+        // Therefore, this must happen after the heap traversal for backwards pointer threading.
+        self.thread_static_roots();
+
+        let continuation_table = *self.heap.roots.continuation_table_ptr_loc;
+        if continuation_table.is_ptr() && continuation_table.get_ptr() >= self.generation_base() {
+            self.thread(self.heap.roots.continuation_table_ptr_loc);
+        }
+
+        let region0 = *self.heap.roots.region0_ptr_loc;
+        if region0.is_ptr() && region0.get_ptr() >= self.generation_base() {
+            self.thread(self.heap.roots.region0_ptr_loc);
+        }
+
+        // For the young generation GC run, the forward pointers from the old generation must be threaded too.
+        if self.strategy == Strategy::Young {
+            self.thread_old_generation_pointers();
+        }
+    }
+
+    unsafe fn thread_static_roots(&self) {
+        let root_array = self.heap.roots.static_roots.as_array();
+        for i in 0..root_array.len() {
+            let object = root_array.get(i).as_obj();
+            assert_eq!(object.tag(), TAG_MUTBOX);
+            assert!((object as usize) < self.heap.limits.base);
+            self.thread_root_mutbox_fields(object as *mut MutBox);
+        }
+    }
+
+    unsafe fn thread_root_mutbox_fields(&self, mutbox: *mut MutBox) {
+        let field_addr = &mut (*mutbox).field;
+        if pointer_to_dynamic_heap(field_addr, self.generation_base()) {
+            self.thread(field_addr);
+        }
+    }
+
+    unsafe fn thread_all_backward_pointers(&mut self) {
+        let mut bitmap_iter = iter_bits();
+        let mut bit = bitmap_iter.next();
+        while bit != BITMAP_ITER_END {
+            let object = (bit * WORD_SIZE) as *mut Obj;
+            self.thread_backward_pointer_fields(object);
+            bit = bitmap_iter.next();
+        }
+    }
+
+    unsafe fn thread_backward_pointer_fields(&mut self, object: *mut Obj) {
+        debug_assert!(object.tag() < TAG_ARRAY_SLICE_MIN);
+        visit_pointer_fields(
+            &mut (),
+            object,
+            object.tag(),
+            self.generation_base(),
+            |_, field_address| {
+                let field_value = *field_address;
+                // Thread if backwards or self pointer
+                if field_value.get_ptr() <= object as usize {
+                    (&self).thread(field_address);
+                }
+            },
+            |_, _, array| array.len(),
+        );
+    }
+
+    // Thread forward pointers in old generation leading to young generation
+    unsafe fn thread_old_generation_pointers(&mut self) {
+        let mut iterator = REMEMBERED_SET.as_ref().unwrap().iterate();
+        while iterator.has_next() {
+            let location = iterator.current().get_raw() as *mut Value;
+            assert!(
+                (location as usize) >= self.heap.limits.base
+                    && (location as usize) < self.heap.limits.last_free
+            );
+            let value = *location;
+            // value in the location may have changed since recording by the write barrer
+            if value.points_to_or_beyond(self.heap.limits.last_free) {
+                self.thread(location);
+            }
+            iterator.next();
+        }
+    }
+
+    unsafe fn move_phase(&mut self) {
+        REMEMBERED_SET = None; // no longer valid when the moving phase starts
+        let mut free = self.heap.limits.base;
+
+        let mut bitmap_iter = iter_bits();
+        if self.strategy == Strategy::Young {
+            free = self.heap.limits.last_free;
+        }
+        let mut bit = bitmap_iter.next();
+        while bit != BITMAP_ITER_END {
+            let old_pointer = (bit * WORD_SIZE) as *mut Obj;
+            let new_pointer = free;
+
+            // Unthread backwards pointers as well as forward pointers of static objects.
+            // In the case of a young collection, also unthread forward pointers of old objects.
+            self.unthread(old_pointer, new_pointer);
+
+            // Move the object
+            let object_size = block_size(old_pointer as usize);
+            if new_pointer as usize != old_pointer as usize {
+                memcpy_words(new_pointer as usize, old_pointer as usize, object_size);
+                debug_assert!(object_size.as_usize() > size_of::().as_usize());
+
+                // Update forwarding pointer
+                let new_obj = new_pointer as *mut Obj;
+                debug_assert!(new_obj.tag() >= TAG_OBJECT && new_obj.tag() <= TAG_NULL);
+            }
+
+            free += object_size.to_bytes().as_usize();
+
+            // Thread forward pointers of the object, even if not moved
+            self.thread_forward_pointers(new_pointer as *mut Obj);
+
+            bit = bitmap_iter.next();
+        }
+
+        self.heap.limits.free = free;
+    }
+
+    /// Thread forward pointers in object
+    unsafe fn thread_forward_pointers(&mut self, object: *mut Obj) {
+        visit_pointer_fields(
+            &mut (),
+            object,
+            object.tag(),
+            self.generation_base(),
+            |_, field_address| {
+                if (*field_address).get_ptr() > object as usize {
+                    (&self).thread(field_address)
+                }
+            },
+            |_, _, array| array.len(),
+        );
+    }
+
+    unsafe fn thread(&self, field: *mut Value) {
+        let pointed = (*field).get_ptr() as *mut Obj;
+        assert!(self.should_be_threaded(pointed));
+        let pointed_header = pointed.tag();
+        *field = Value::from_raw(pointed_header);
+        (*pointed).tag = field as usize;
+    }
+
+    unsafe fn unthread(&self, object: *mut Obj, new_location: usize) {
+        assert!(self.should_be_threaded(object));
+        let mut header = object.tag();
+        while header & 0b1 == 0 {
+            let tmp = (header as *const Obj).tag();
+            (*(header as *mut Value)) = Value::from_ptr(new_location);
+            header = tmp;
+        }
+        debug_assert!(header >= TAG_OBJECT && header <= TAG_NULL);
+        (*object).tag = header;
+    }
+
+    unsafe fn should_be_threaded(&self, object: *mut Obj) -> bool {
+        object as usize >= self.generation_base()
+    }
+}
diff --git a/rts/motoko-rts/src/gc/generational/mark_stack.rs b/rts/motoko-rts/src/gc/generational/mark_stack.rs
new file mode 100644
index 00000000000..7137e8afad9
--- /dev/null
+++ b/rts/motoko-rts/src/gc/generational/mark_stack.rs
@@ -0,0 +1,75 @@
+//! A stack for marking heap objects (for GC). Adopted from mark & compact GC.
+//! Simplified to only store object pointers without tags.
+
+use crate::gc::generational::TAG_BLOB_B;
+use crate::memory::{alloc_blob, Memory};
+use crate::types::{Blob, Words};
+
+use core::ptr::null_mut;
+
+/// Initial stack size
+pub const INIT_STACK_SIZE: Words = Words(64);
+
+/// Pointer to the `blob` object for the mark stack. Used to get the capacity of the stack.
+static mut STACK_BLOB_PTR: *mut Blob = null_mut();
+
+/// Bottom of the mark stack
+pub static mut STACK_BASE: *mut usize = null_mut();
+
+/// Top of the mark stack
+pub static mut STACK_TOP: *mut usize = null_mut();
+
+/// Next free slot in the mark stack
+pub static mut STACK_PTR: *mut usize = null_mut();
+
+/// Allocate the mark stack at the start of each GC run
+pub unsafe fn alloc_mark_stack(mem: &mut M) {
+    assert!(STACK_BLOB_PTR.is_null());
+
+    // Allocating an actual object here to not break dump_heap
+    // No post allocation barrier as this RTS-internal blob will be collected by the GC.
+    STACK_BLOB_PTR = alloc_blob(mem, TAG_BLOB_B, INIT_STACK_SIZE.to_bytes()).get_ptr() as *mut Blob;
+    STACK_BASE = STACK_BLOB_PTR.payload_addr() as *mut usize;
+    STACK_PTR = STACK_BASE;
+    STACK_TOP = STACK_BASE.add(INIT_STACK_SIZE.as_usize());
+}
+
+/// Deallocate the mark stack after each GC run
+pub unsafe fn free_mark_stack() {
+    STACK_BLOB_PTR = null_mut();
+    STACK_BASE = null_mut();
+    STACK_PTR = null_mut();
+    STACK_TOP = null_mut();
+}
+
+/// Doubles the stack size
+pub unsafe fn grow_stack(mem: &mut M) {
+    let stack_cap: Words = STACK_BLOB_PTR.len().to_words();
+    let p = mem.alloc_words(stack_cap).get_ptr() as *mut usize;
+
+    // Make sure nothing was allocated after the stack
+    assert_eq!(STACK_TOP, p);
+
+    let new_cap: Words = stack_cap * 2;
+    (*STACK_BLOB_PTR).len = new_cap.to_bytes();
+    STACK_TOP = STACK_BASE.add(new_cap.as_usize());
+}
+
+/// Push a new unskewed object pointer to be marked later
+pub unsafe fn push_mark_stack(mem: &mut M, object: usize) {
+    if STACK_PTR == STACK_TOP {
+        grow_stack(mem);
+    }
+    *STACK_PTR = object;
+    STACK_PTR = STACK_PTR.add(1);
+}
+
+/// Pop a unskewed object pointer if existend to be visited next
+pub unsafe fn pop_mark_stack() -> Option {
+    if STACK_PTR == STACK_BASE {
+        return None;
+    }
+    STACK_PTR = STACK_PTR.sub(1);
+    let object = *STACK_PTR;
+    return Some(object);
+}
diff --git a/rts/motoko-rts/src/gc/generational/sanity_checks.rs b/rts/motoko-rts/src/gc/generational/sanity_checks.rs
new file mode 100644
index 00000000000..f0063006229
--- /dev/null
+++ b/rts/motoko-rts/src/gc/generational/sanity_checks.rs
@@ -0,0 +1,173 @@
+//! Extensive sanity checks for generational GC features.
+//! * Write barrier coverage by memory snapshot comparisons.
+//! * Memory sanity check, including a full heap scan.
+#![allow(dead_code)]
+
+use core::ptr::null_mut;
+
+use super::write_barrier::REMEMBERED_SET;
+use super::{Heap, Limits, Roots};
+use crate::mem_utils::memcpy_bytes;
+use crate::memory::{alloc_blob, Memory};
+use crate::types::*;
+use crate::visitor::{classical::pointer_to_dynamic_heap, visit_pointer_fields};
+
+static mut SNAPSHOT: *mut Blob = null_mut();
+
+/// Take a memory snapshot. To be initiated after GC run.
+pub unsafe fn take_snapshot(heap: &mut Heap) {
+    let length = Bytes(heap.limits.free);
+    // No post allocation barrier as this RTS-internal blob will be collected by the GC.
+    let blob = alloc_blob(heap.mem, TAG_BLOB_B, length).get_ptr() as *mut Blob;
+    memcpy_bytes(blob.payload_addr() as usize, 0, length);
+    SNAPSHOT = blob;
+}
+
+/// Verify write barrier coverage by comparing the memory against the previous snapshot.
+/// To be initiated before the next GC run. No effect if no snapshpot has been taken.
+pub unsafe fn verify_snapshot(heap: &Heap, verify_roots: bool) {
+    if SNAPSHOT.is_null() {
+        return;
+    }
+    assert!(heap.limits.base <= heap.limits.free);
+    if verify_roots {
+        verify_static_roots(heap.roots.static_roots.as_array(), heap.limits.free);
+    }
+    verify_heap(&heap.limits);
+    SNAPSHOT = null_mut();
+}
+
+unsafe fn verify_static_roots(static_roots: *mut Array, last_free: usize) {
+    for index in 0..static_roots.len() {
+        let current = static_roots.get(index).as_obj();
+        assert_eq!(current.tag(), TAG_MUTBOX); // check tag
+        let mutbox = current as *mut MutBox;
+        let current_field = &mut (*mutbox).field;
+        if relevant_field(current_field, last_free) {
+            verify_field(current_field);
+        }
+    }
+}
+
+unsafe fn verify_heap(limits: &Limits) {
+    assert!(SNAPSHOT.len().as_usize() <= limits.free);
+    let mut pointer = limits.base;
+    while pointer < SNAPSHOT.len().as_usize() {
+        if Value::from_ptr(pointer).is_obj() {
+            let current = pointer as *mut Obj;
+            let previous = (SNAPSHOT.payload_addr() as usize + pointer) as *mut Obj;
+            assert!(current.tag() == previous.tag());
+            visit_pointer_fields(
+                &mut (),
+                current,
+                current.tag(),
+                0,
+                |_, current_field| {
+                    if relevant_field(current_field, limits.last_free) {
+                        verify_field(current_field);
+                    }
+                },
+                |_, slice_start, arr| {
+                    assert!(slice_start == 0);
+                    arr.len()
+                },
+            );
+        }
+        pointer += block_size(pointer).to_bytes().as_usize();
+    }
+}
+
+unsafe fn relevant_field(current_field: *mut Value, last_free: usize) -> bool {
+    if (current_field as usize) < last_free {
+        let value = *current_field;
+        value.is_ptr() && value.get_ptr() as usize >= last_free
+    } else {
+        false
+    }
+}
+
+unsafe fn verify_field(current_field: *mut Value) {
+    let memory_copy = SNAPSHOT.payload_addr() as usize;
+    let previous_field = (memory_copy + current_field as usize) as *mut Value;
+    if *previous_field != *current_field && !recorded(current_field as usize) {
+        panic!("Missing write barrier at {:#x}", current_field as usize);
+    }
+}
+
+unsafe fn recorded(value: usize) -> bool {
+    match &REMEMBERED_SET {
+        None => panic!("No remembered set"),
+        Some(remembered_set) => remembered_set.contains(Value::from_raw(value)),
+    }
+}
+
+pub struct MemoryChecker<'a> {
+    limits: &'a Limits,
+    roots: &'a Roots,
+}
+
+pub unsafe fn check_memory(limits: &Limits, roots: &Roots) {
+    let checker = MemoryChecker { limits, roots };
+    checker.check_memory();
+}
+
+impl<'a> MemoryChecker<'a> {
+    unsafe fn check_memory(&self) {
+        self.check_static_roots();
+        if (*self.roots.continuation_table_ptr_loc).is_ptr() {
+            self.check_object(*self.roots.continuation_table_ptr_loc);
+        }
+        self.check_heap();
+    }
+
+    unsafe fn check_static_roots(&self) {
+        let root_array = self.roots.static_roots.as_array();
+        for i in 0..root_array.len() {
+            let obj = root_array.get(i).as_obj();
+            assert_eq!(obj.tag(), TAG_MUTBOX);
+            assert!((obj as usize) < self.limits.base);
+            let mutbox = obj as *mut MutBox;
+            let field_addr = &mut (*mutbox).field;
+            if pointer_to_dynamic_heap(field_addr, self.limits.base as usize) {
+                let object = *field_addr;
+                self.check_object(object);
+            }
+        }
+    }
+
+    unsafe fn check_object(&self, object: Value) {
+        self.check_object_header(object);
+        visit_pointer_fields(
+            &mut (),
+            object.as_obj(),
+            object.tag(),
+            0,
+            |_, field_address| {
+                // Ignore null pointers used in text_iter.
+                if (*field_address).get_ptr() as *mut Obj != null_mut() {
+                    (&self).check_object_header(*field_address);
+                }
+            },
+            |_, _, arr| arr.len(),
+        );
+    }
+
+    unsafe fn check_object_header(&self, object: Value) {
+        assert!(object.is_ptr());
+        let pointer = object.get_ptr();
+        assert!(pointer < self.limits.free);
+        let tag = object.tag();
+        assert!(tag >= TAG_OBJECT && tag <= TAG_NULL);
+    }
+
+    unsafe fn check_heap(&self) {
+        let mut pointer = self.limits.base;
+        while pointer < self.limits.free {
+            let block = Value::from_ptr(pointer as usize);
+            if block.tag() != TAG_ONE_WORD_FILLER {
+                self.check_object(block);
+            }
+            pointer += block_size(pointer as usize).to_bytes().as_usize();
+        }
+    }
+}
diff --git a/rts/motoko-rts/src/gc/generational/write_barrier.rs b/rts/motoko-rts/src/gc/generational/write_barrier.rs
new file mode 100644
index 00000000000..1f03facd98b
--- /dev/null
+++ b/rts/motoko-rts/src/gc/generational/write_barrier.rs
@@ -0,0 +1,46 @@
+//! Write barrier, used for generational GC
+
+use crate::gc::remembered_set::RememberedSet;
+use crate::memory::Memory;
+use crate::types::Value;
+use motoko_rts_macros::ic_mem_fn;
+
+pub static mut REMEMBERED_SET: Option = None;
+pub static mut HEAP_BASE: usize = 0;
+pub static mut LAST_HP: usize = 0;
+
+#[cfg(feature = "ic")]
+/// (Re-)initialize the write barrier for generational GC.
+pub(crate) unsafe fn init_generational_write_barrier(mem: &mut M) {
+    use crate::memory::ic::{self, linear_memory};
+    REMEMBERED_SET = Some(RememberedSet::new(mem));
+    HEAP_BASE = ic::get_aligned_heap_base();
+    LAST_HP = linear_memory::LAST_HP;
+}
+
+/// Write barrier to be called AFTER the pointer store, used for the generational GC.
+/// `location`: location of modified pointer (address of object field or array element).
+///
+/// As the barrier is called after the write, `*location` refers to the NEW value.
+/// No effect if the write barrier is deactivated.
+#[ic_mem_fn]
+pub unsafe fn post_write_barrier(mem: &mut M, location: usize) {
+    // Must be an unskewed address.
+    debug_assert_eq!(location & 0b1, 0);
+    // Checks have been optimized according to the frequency of occurrence.
+    // Only record locations inside old generation. Static roots are anyway marked by GC.
+    if location < LAST_HP {
+        // Nested ifs are more efficient when counting instructions on IC (explicit return counts as an instruction).
+        let value = *(location as *mut Value);
+        if value.points_to_or_beyond(LAST_HP) {
+            #[allow(clippy::collapsible_if)]
+            if location >= HEAP_BASE {
+                // Trap pointers that lead from old generation (or static roots) to young generation.
+                REMEMBERED_SET
+                    .as_mut()
+                    .unwrap()
+                    .insert(mem, Value::from_raw(location));
+            }
+        }
+    }
+}
diff --git a/rts/motoko-rts/src/gc/incremental.rs b/rts/motoko-rts/src/gc/incremental.rs
index ce6d8956577..4c8bf761942 100644
--- a/rts/motoko-rts/src/gc/incremental.rs
+++ b/rts/motoko-rts/src/gc/incremental.rs
@@ -11,7 +11,9 @@
 //! retained across upgrades and therefore be stored part of the
 //! persistent metadata, cf. `persistence::PersistentMetadata`.
 
-use motoko_rts_macros::ic_mem_fn;
+#[cfg(feature = "ic")]
+use motoko_rts_macros::classical_persistence;
+use motoko_rts_macros::{enhanced_orthogonal_persistence, ic_mem_fn};
 
 use crate::{memory::Memory, stable_option::StableOption, types::*, visitor::visit_pointer_fields};
 
@@ -35,21 +37,39 @@ mod phases;
 pub mod roots;
 #[cfg(feature = "memory_check")]
 pub mod sanity_checks;
+#[cfg(feature = "ic")]
+mod scheduling;
 pub mod sort;
 pub mod time;
 
 #[ic_mem_fn(ic_only)]
 unsafe fn initialize_incremental_gc(mem: &mut M) {
+    initialize(mem);
+}
+
+#[cfg(feature = "ic")]
+#[enhanced_orthogonal_persistence]
+unsafe fn initialize(_mem: &mut M) {
     use crate::persistence::initialize_memory;
+    initialize_memory::();
+}
 
-    initialize_memory(mem);
+#[cfg(feature = "ic")]
+#[classical_persistence]
+unsafe fn initialize(_mem: &mut M) {
+    use crate::memory::ic;
+
+    let state = STATE.get_mut();
+    let heap_base = ic::get_aligned_heap_base();
+    *state = IncrementalGC::::initial_gc_state(heap_base);
+    partitioned_heap::allocate_initial_memory(Bytes(heap_base));
 }
 
 #[ic_mem_fn(ic_only)]
 unsafe fn schedule_incremental_gc(mem: &mut M) {
     let state = get_incremental_gc_state();
     let running = state.phase != Phase::Pause;
-    if running || should_start() {
+    if running || scheduling::should_start_gc() {
         incremental_gc(mem);
     }
 }
@@ -58,6 +78,7 @@ unsafe fn schedule_incremental_gc(mem: &mut M) {
 unsafe fn incremental_gc(mem: &mut M) {
     use self::roots::root_set;
     let state = get_incremental_gc_state();
+    assert!(state.phase != Phase::Stop);
     if state.phase == Phase::Pause {
         record_gc_start::();
     }
@@ -67,50 +88,20 @@ unsafe fn incremental_gc(mem: &mut M) {
     }
 }
 
-#[cfg(feature = "ic")]
-unsafe fn should_start() -> bool {
-    use self::partitioned_heap::PARTITION_SIZE;
-    use crate::constants::GB;
-    use crate::memory::ic;
-
-    const CRITICAL_HEAP_LIMIT: Bytes = Bytes((2 * GB + 256 * MB) as u32);
-    const CRITICAL_GROWTH_THRESHOLD: f64 = 0.01;
-    const MEDIUM_HEAP_LIMIT: Bytes = Bytes(1 * GB as u32);
-    const MEDIUM_GROWTH_THRESHOLD: f64 = 0.35;
-    const LOW_GROWTH_THRESHOLD: f64 = 0.65;
-
-    let heap_size = ic::get_heap_size();
-    let growth_threshold = if heap_size > CRITICAL_HEAP_LIMIT {
-        CRITICAL_GROWTH_THRESHOLD
-    } else if heap_size > MEDIUM_HEAP_LIMIT {
-        MEDIUM_GROWTH_THRESHOLD
-    } else {
-        LOW_GROWTH_THRESHOLD
-    };
-
-    let current_allocations = ic::get_total_allocations();
-    let state = get_incremental_gc_state();
-    debug_assert!(current_allocations >= state.statistics.last_allocations);
-    let absolute_growth = current_allocations - state.statistics.last_allocations;
-    let relative_growth = absolute_growth.0 as f64 / heap_size.as_usize() as f64;
-    relative_growth > growth_threshold && heap_size.as_usize() >= PARTITION_SIZE
-}
-
 #[cfg(feature = "ic")]
 unsafe fn record_gc_start() {
-    use crate::memory::ic;
+    use crate::memory::ic::partitioned_memory;
 
     let state = get_incremental_gc_state();
-    state.statistics.last_allocations = ic::get_total_allocations();
+    state.statistics.last_allocations = partitioned_memory::get_total_allocations();
 }
 
 #[cfg(feature = "ic")]
 unsafe fn record_gc_stop() {
-    use crate::memory::ic;
-    use crate::persistence::HEAP_START;
+    use crate::memory::ic::{self, partitioned_memory};
 
-    let heap_size = ic::get_heap_size();
-    let static_size = Bytes(HEAP_START as u32);
+    let heap_size = partitioned_memory::get_heap_size();
+    let static_size = Bytes(ic::get_aligned_heap_base());
     debug_assert!(heap_size >= static_size);
     let dynamic_size = heap_size - static_size;
     let state = get_incremental_gc_state();
@@ -122,7 +113,7 @@ struct Statistics {
     // Total number of allocation at the start of the last GC run.
     last_allocations: Bytes,
     // Maximum heap size the end of a GC run.
-    max_live: Bytes,
+    max_live: Bytes,
 }
 
 /// GC phases per run. Each of the following phases is performed in potentially multiple increments.
@@ -149,6 +140,7 @@ const INCREMENT_ALLOCATION_FACTOR: usize = 50; // Additional time factor per con
 #[derive(PartialEq)]
 #[repr(C)]
 enum Phase {
+    Stop,     // GC stopped during canister upgrade and explicit stabilization/destabilization.
     Pause,    // Inactive, waiting for the next GC run.
     Mark,     // Incremental marking.
     Evacuate, // Incremental evacuation compaction.
@@ -167,6 +159,21 @@ pub struct State {
     statistics: Statistics,
 }
 
+/// GC state retained over multiple GC increments.
+#[classical_persistence]
+#[cfg(feature = "ic")]
+static mut STATE: core::cell::RefCell = core::cell::RefCell::new(State {
+    phase: Phase::Pause,
+    partitioned_heap: self::partitioned_heap::UNINITIALIZED_HEAP,
+    allocation_count: 0,
+    mark_state: StableOption::None,
+    iterator_state: StableOption::None,
+    statistics: Statistics {
+        last_allocations: Bytes(0),
+        max_live: Bytes(0),
+    },
+});
+
 /// Temporary state during message execution, not part of the persistent metadata.
 static mut RUNNING_GC_INCREMENT: bool = false;
 
@@ -181,8 +188,8 @@ pub struct IncrementalGC<'a, M: Memory> {
 impl<'a, M: Memory + 'a> IncrementalGC<'a, M> {
     /// (Re-)Initialize the entire incremental garbage collector.
     /// Called on a runtime system start with incremental GC and also during RTS testing.
-    pub unsafe fn initial_gc_state(mem: &'a mut M, heap_base: usize) -> State {
-        let partitioned_heap = PartitionedHeap::new(mem, heap_base);
+    pub unsafe fn initial_gc_state(heap_base: usize) -> State {
+        let partitioned_heap = PartitionedHeap::new(heap_base);
         let statistics = Statistics {
             last_allocations: Bytes(0),
             max_live: Bytes(0),
@@ -276,7 +283,7 @@ impl<'a, M: Memory + 'a> IncrementalGC<'a, M> {
         debug_assert!(self.mark_completed());
         MarkIncrement::::complete_phase(self.state);
         self.state.phase = Phase::Evacuate;
-        EvacuationIncrement::::start_phase(self.state);
+        EvacuationIncrement::::start_phase(self.mem, self.state);
     }
 
     unsafe fn evacuation_completed(&self) -> bool {
@@ -328,7 +335,10 @@ unsafe fn pre_write_barrier(mem: &mut M, state: &mut State, overwritt
         if overwritten_value.points_to_or_beyond(base_address) {
             let mut time = BoundedTime::new(0);
             let mut increment = MarkIncrement::instance(mem, state, &mut time);
+
+            #[enhanced_orthogonal_persistence]
             debug_assert_ne!(overwritten_value, NULL_POINTER);
+
             increment.mark_object(overwritten_value);
         }
     }
@@ -395,6 +405,7 @@ unsafe fn update_new_allocation(state: &State, new_object: Value) {
             &mut (),
             object,
             object.tag(),
+            state.partitioned_heap.base_address(),
             |_, field| {
                 *field = (*field).forward_if_possible();
             },
@@ -416,15 +427,52 @@ pub unsafe fn get_partitioned_heap() -> &'static mut PartitionedHeap {
 }
 
 #[cfg(feature = "ic")]
+#[enhanced_orthogonal_persistence]
 pub unsafe fn get_incremental_gc_state() -> &'static mut State {
     crate::persistence::get_incremental_gc_state()
 }
 
 #[cfg(feature = "ic")]
-pub unsafe fn get_max_live_size() -> Bytes {
+#[classical_persistence]
+pub unsafe fn get_incremental_gc_state() -> &'static mut State {
+    STATE.get_mut()
+}
+
+#[cfg(feature = "ic")]
+pub unsafe fn get_max_live_size() -> Bytes {
     get_incremental_gc_state().statistics.max_live
 }
 
+/// Stop the GC. Called before stabilzation and destabilization.
+pub unsafe fn stop_gc() {
+    let state = get_incremental_gc_state();
+    state.phase = Phase::Stop;
+}
+
+/// Resume the stopped GC. Called after completed destabilization.
+pub unsafe fn resume_gc() {
+    let state = get_incremental_gc_state();
+    assert!(state.phase == Phase::Stop);
+    state.phase = Phase::Pause;
+    // The allocation during destabilization should not count as concurrent
+    // mutator allocation. Therefore, reset the allocation count.
+    state.allocation_count = 0;
+}
+
+pub unsafe fn is_gc_stopped() -> bool {
+    get_incremental_gc_state().phase == Phase::Stop
+}
+
+/// Safety guard before Candid-stabilization with classical persistence.
+/// For graph copying, a different GC stop function is used, see
+/// `stabilization::ic::stop_gc_before_stabilization()`.
+#[classical_persistence]
+#[cfg(feature = "ic")]
+#[no_mangle]
+unsafe extern "C" fn stop_gc_on_upgrade() {
+    stop_gc();
+}
+
 /// For RTS unit testing only.
 #[cfg(not(feature = "ic"))]
 static mut TEST_GC_STATE: Option = None;
@@ -445,23 +493,21 @@ pub unsafe fn set_incremental_gc_state(state: Option) {
     TEST_GC_STATE = state;
 }
 
-#[cfg(feature = "ic")]
-use crate::constants::MB;
-
-/// Additional memory reserve in bytes for the GC.
-/// * To allow mark bitmap allocation, i.e. max. 128 MB in 4 GB address space.
-/// * 512 MB of free space for evacuations/compactions.
-#[cfg(feature = "ic")]
-const GC_MEMORY_RESERVE: usize = (128 + 512) * MB;
-
 #[cfg(feature = "ic")]
 pub unsafe fn memory_reserve() -> usize {
+    use crate::constants::MB;
     use crate::memory::GENERAL_MEMORY_RESERVE;
 
-    let additional_reserve = if RUNNING_GC_INCREMENT {
+    if RUNNING_GC_INCREMENT {
         0
     } else {
-        GC_MEMORY_RESERVE
-    };
-    GENERAL_MEMORY_RESERVE + additional_reserve
+        // Ensure there are free partitions for evacuation.
+        // 16 free partitions in 32-bit. 8 free partitions in 64-bit.
+        const EVACUATION_RESERVE: usize = 512 * MB;
+        // Reserve space for the mark bitmap, the evacuation space, and
+        // extra space for query and (stabilization) upgrade calls.
+        get_partitioned_heap().maximum_mark_bitmap_size()
+            + EVACUATION_RESERVE
+            + GENERAL_MEMORY_RESERVE
+    }
 }
diff --git a/rts/motoko-rts/src/gc/incremental/array_slicing.rs b/rts/motoko-rts/src/gc/incremental/array_slicing.rs
index a18fcb72217..d3aef1c28fc 100644
--- a/rts/motoko-rts/src/gc/incremental/array_slicing.rs
+++ b/rts/motoko-rts/src/gc/incremental/array_slicing.rs
@@ -5,8 +5,8 @@ use crate::types::*;
 /// This helps:
 /// * Ensure bounded increments when visiting fields on large arrays.
 /// * Prevent mark stack overflows on large arrays.
-pub unsafe fn slice_array(array: *mut Array) -> u32 {
-    const SLICE_INCREMENT: u32 = 128;
+pub unsafe fn slice_array(array: *mut Array) -> usize {
+    const SLICE_INCREMENT: usize = 128;
     debug_assert!(SLICE_INCREMENT >= TAG_ARRAY_SLICE_MIN);
     let (base_tag, slice_start) = array.get_slice_start();
     if array.len() - slice_start > SLICE_INCREMENT {
diff --git a/rts/motoko-rts/src/gc/incremental/barriers.rs b/rts/motoko-rts/src/gc/incremental/barriers.rs
index 091c255892a..beab2de3492 100644
--- a/rts/motoko-rts/src/gc/incremental/barriers.rs
+++ b/rts/motoko-rts/src/gc/incremental/barriers.rs
@@ -28,7 +28,7 @@ pub unsafe extern "C" fn running_gc() -> bool {
 /// * Resolve forwarding: Used during the GC update phase to adjust old pointers to their new forwarded addresses.
 #[ic_mem_fn]
 pub unsafe fn write_with_barrier(mem: &mut M, location: *mut Value, value: Value) {
-    debug_assert!(!is_skewed(location as u32));
+    debug_assert!(!is_skewed(location as usize));
     debug_assert_ne!(location, core::ptr::null_mut());
 
     let state = get_incremental_gc_state();
diff --git a/rts/motoko-rts/src/gc/incremental/mark_bitmap.rs b/rts/motoko-rts/src/gc/incremental/mark_bitmap.rs
index d6f5cde880f..a0a4a5b71ce 100644
--- a/rts/motoko-rts/src/gc/incremental/mark_bitmap.rs
+++ b/rts/motoko-rts/src/gc/incremental/mark_bitmap.rs
@@ -5,8 +5,8 @@
 //! of a GC run.
 //!
 //! A bitmap is represented in raw memory (no object header, no blob) of the fixed size
-//! `PARTITION_SIZE / WORD_SIZE / u8::bits`, which is `PARTITION_SIZE /32` bytes. For simplicity, the bitmaps
-//! is not shortened for partitions that also accommodate static space besides dynamic space.
+//! `PARTITION_SIZE / WORD_SIZE / u8::BITS` bytes. For simplicity, the bitmaps is not shortened for partitions
+//! that also accommodate static space besides dynamic space.
 //!
 //! To access a mark bit of an object, the corresponding bitmap and address offset inside the object's partition
 //! needs to be first determined. The corresponding bit is then accessed at the byte with index
@@ -20,13 +20,14 @@
 //! The mark bitmap serves for fast traversal of marked objects in a partition with few marked objects
 //! (and many garbage objects).
 
-use core::{mem::size_of, ptr::null_mut};
+use core::ptr::null_mut;
 
 use crate::{constants::WORD_SIZE, mem_utils::memzero, types::Bytes};
 
 use super::partitioned_heap::PARTITION_SIZE;
 
-const BITMAP_FRACTION: usize = (WORD_SIZE * u8::BITS) as usize;
+const BITS_PER_BYTE: usize = u8::BITS as usize;
+const BITMAP_FRACTION: usize = WORD_SIZE * BITS_PER_BYTE;
 
 pub const BITMAP_SIZE: usize = PARTITION_SIZE / BITMAP_FRACTION;
 
@@ -48,13 +49,9 @@ impl MarkBitmap {
     }
 
     /// Assign and initialize the bitmap memory at the defined address.
-    /// The `bitmap_address` must be 64-bit-aligned for fast iteration.
     pub unsafe fn assign(&mut self, bitmap_address: *mut u8) {
-        debug_assert_eq!(bitmap_address as usize % size_of::(), 0);
-        memzero(
-            bitmap_address as usize,
-            Bytes(BITMAP_SIZE as u32).to_words(),
-        );
+        debug_assert_eq!(bitmap_address as usize % WORD_SIZE, 0);
+        memzero(bitmap_address as usize, Bytes(BITMAP_SIZE).to_words());
         debug_assert_eq!(self.pointer, null_mut());
         self.pointer = bitmap_address;
     }
@@ -65,17 +62,17 @@ impl MarkBitmap {
     }
 
     fn word_index(&self, offset_in_partition: usize) -> usize {
-        debug_assert_eq!(offset_in_partition % WORD_SIZE as usize, 0);
+        debug_assert_eq!(offset_in_partition % WORD_SIZE, 0);
         debug_assert!(offset_in_partition < PARTITION_SIZE);
-        offset_in_partition / (WORD_SIZE as usize)
+        offset_in_partition / WORD_SIZE
     }
 
     /// Check whether the object at defined address offset in the partition is marked.
     pub unsafe fn is_marked(&self, offset_in_partition: usize) -> bool {
         debug_assert_ne!(self.pointer, null_mut());
         let word_index = self.word_index(offset_in_partition);
-        let byte_index = word_index / u8::BITS as usize;
-        let bit_index = word_index % u8::BITS as usize;
+        let byte_index = word_index / BITS_PER_BYTE;
+        let bit_index = word_index % BITS_PER_BYTE;
         let byte = self.pointer.add(byte_index);
         (*byte >> bit_index) & 0b1 != 0
     }
@@ -84,8 +81,8 @@ impl MarkBitmap {
     pub unsafe fn mark(&mut self, offset_in_partition: usize) {
         debug_assert_ne!(self.pointer, null_mut());
         let word_index = self.word_index(offset_in_partition);
-        let byte_index = word_index / u8::BITS as usize;
-        let bit_index = word_index % u8::BITS as usize;
+        let byte_index = word_index / BITS_PER_BYTE;
+        let bit_index = word_index % BITS_PER_BYTE;
         let byte = self.pointer.add(byte_index);
         *byte |= 0b1 << bit_index;
     }
@@ -101,7 +98,7 @@ impl MarkBitmap {
 /// to better support the incremental evacuation and update GC increments.
 #[repr(C)]
 pub struct BitmapIterator {
-    /// Start address of the mark bitmap. Must be 64-bit-aligned.
+    /// Start address of the mark bitmap.
     bitmap_pointer: *mut u8,
     /// Index of next bit to continue iteration in the bitmap.
     /// Invariant during (initialized and unfinished):
@@ -110,10 +107,8 @@ pub struct BitmapIterator {
     /// reading the corresponding bit in the bitmap.
     /// Sentinel: `BITMAP_ITERATION_END` if the iteration is finished.
     next_bit_index: usize,
-    /// Current 64-bit word in the bitmap that we are iterating.
-    /// Optimization: Reading in 64-bit chunks to check as many bits as
-    /// possible with a single `word != 0`.
-    current_word: u64,
+    /// Current word in the bitmap that we are iterating.
+    current_word: usize,
     /// Number of leading bits that are initially zeroed in `current_word`.
     leading_zeros: usize,
 }
@@ -123,16 +118,15 @@ pub struct BitmapIterator {
 pub const BITMAP_ITERATION_END: usize = usize::MAX;
 
 /// Last possible valid value of `next_bit_index`.
-const BIT_INDEX_END: usize = BITMAP_SIZE * u8::BITS as usize;
+const BIT_INDEX_END: usize = BITMAP_SIZE * BITS_PER_BYTE;
 
 const _: () = assert!(BIT_INDEX_END < BITMAP_ITERATION_END);
 
 impl BitmapIterator {
-    /// The `bitmap_pointer` must be 64-bit-aligned.
     fn new(bitmap_pointer: *mut u8) -> BitmapIterator {
         debug_assert_ne!(bitmap_pointer, null_mut());
-        debug_assert_eq!(PARTITION_SIZE % size_of::(), 0);
-        debug_assert_eq!(bitmap_pointer as usize % size_of::(), 0);
+        debug_assert_eq!(PARTITION_SIZE % WORD_SIZE, 0);
+        debug_assert_eq!(bitmap_pointer as usize % WORD_SIZE, 0);
         let mut iterator = BitmapIterator {
             bitmap_pointer,
             next_bit_index: 0,
@@ -151,19 +145,19 @@ impl BitmapIterator {
         if self.next_bit_index == BITMAP_ITERATION_END {
             return BITMAP_ITERATION_END;
         } else {
-            (self.next_bit_index - 1) * WORD_SIZE as usize
+            (self.next_bit_index - 1) * WORD_SIZE
         }
     }
 
     /// Advance the iterator to the next marked offset.
     pub fn next(&mut self) {
         debug_assert!(self.next_bit_index <= BIT_INDEX_END);
-        // Outer loop iterates the 64-bit words.
+        // Outer loop iterates the words.
         while self.next_bit_index < BIT_INDEX_END {
             // Examine the least significant bit(s) in the current word.
             if self.current_word != 0 {
                 let shift = self.current_word.trailing_zeros() as usize;
-                // Two shifts to avoid an overflow in the case of `shift == 63`.
+                // Two shifts to avoid an overflow in the case of `shift == usize::BITS - 1`.
                 self.current_word >>= shift;
                 self.current_word >>= 1;
                 self.next_bit_index += shift + 1;
@@ -172,14 +166,13 @@ impl BitmapIterator {
                 return;
             }
 
-            // Move on to next word, always within a 64-bit boundary.
+            // Move on to next word.
             self.next_bit_index += self.leading_zeros;
             if self.next_bit_index < BIT_INDEX_END {
-                debug_assert_eq!(self.next_bit_index % u8::BITS as usize, 0);
-                let word64_index = self.next_bit_index / u8::BITS as usize;
-                // The bitmap pointer is guaranteed to be always 64-bit aligned, see `BitmapIterator::new()`.
+                debug_assert_eq!(self.next_bit_index % BITS_PER_BYTE, 0);
+                let word_index = self.next_bit_index / BITS_PER_BYTE;
                 self.current_word =
-                    unsafe { *(self.bitmap_pointer.add(word64_index) as *const u64) };
+                    unsafe { *(self.bitmap_pointer.add(word_index) as *const usize) };
                 self.leading_zeros = self.current_word.leading_zeros() as usize;
             }
         }
diff --git a/rts/motoko-rts/src/gc/incremental/partitioned_heap.rs b/rts/motoko-rts/src/gc/incremental/partitioned_heap.rs
index 3e9765da092..174116bb3a7 100644
--- a/rts/motoko-rts/src/gc/incremental/partitioned_heap.rs
+++ b/rts/motoko-rts/src/gc/incremental/partitioned_heap.rs
@@ -1,12 +1,13 @@
 //! Partitioned heap used in incremental GC for compacting evacuation.
 //! The heap is divided in equal sized partitions of a large size `PARTITION_SIZE`.
-//! The first partition(s) may contain a static heap space with static objects that are never moved.
-//! Beyond the static objects of a partition, the dynamic heap space starts with `dynamic_size`.
+//! Some partitions contain a static (pinned) heap space that is never moved.
+//! This is used for static objects at the beginning of the heap or for a partition table.
+//! Beyond the static space of a partition, the dynamic heap space starts with `dynamic_size`.
 //!
-//! Heap layout, with N = `MAX_PARTITIONS`:
-//! ┌───────────────┬───────────────┬───────────────┬───────────────┐
-//! │  partition 0  │  partition 1  |      ...      | partition N-1 |
-//! └───────────────┴───────────────┴───────────────┴───────────────┘
+//! Heap layout, with a dynamic number of partitions:
+//! ┌───────────────┬───────────────┬───────────────┐
+//! │  partition 0  │  partition 1  |      ...      |
+//! └───────────────┴───────────────┴───────────────┘
 //!
 //! Partition layout:
 //! ┌───────────────┬───────────────┬───────────────┐
@@ -18,6 +19,15 @@
 //! Whenever a partition is full or has insufficient space to accommodate a new allocation,
 //! a new empty partition is selected for allocation.
 //!
+//! A linked list of partition tables allows dynamic growth of the heap memory even in 64-bit address
+//! space. The first partition table is placed in the record of the partitioned heap. Subsequent
+//! partition tables reside in the static space of a partition.
+//!
+//! ┌─────────────────────┐ extension ┌─────────────────────┐ extension
+//! │   Partition table   │---------->│   Partition table   │----------> ...
+//! └─────────────────────┘           └─────────────────────┘
+//! (in `PartitionedHeap`)              (in static space)
+//!
 //! On garbage collection, partitions are selected for evacuation by prioritizing high-garbage
 //! partitions. The live objects of the partitions selected for evacuation are moved out to
 //! other remaining partitions (through allocation). Thereby, objects from different evacuated
@@ -38,12 +48,18 @@
 //! of the last partition of a huge object is not used for further small object allocations,
 //! which implies limited internal fragmentation.
 
-use core::{array::from_fn, ops::Range, ptr::null_mut};
+use core::{
+    array::from_fn,
+    iter::Iterator,
+    ops::Range,
+    ptr::{null, null_mut},
+};
+
+use motoko_rts_macros::{classical_persistence, enhanced_orthogonal_persistence};
 
 use crate::{
-    constants::{MB, WASM_MEMORY_BYTE_SIZE},
     gc::incremental::mark_bitmap::BITMAP_ITERATION_END,
-    memory::Memory,
+    memory::{alloc_blob, Memory},
     rts_trap_with,
     stable_option::StableOption,
     types::*,
@@ -57,16 +73,25 @@ use super::{
 
 /// Size of each partition.
 /// Select the size of the power of two with the smallest WASM memory size in the benchmark.
-/// -> Small partitions below 32 MB are inefficient in terms of both memory and runtime costs
+/// -> Small partitions are inefficient in terms of both memory and runtime costs
 ///    due to the increased frequency of large object handling.
-/// -> Large partitions above 32 MB are a waste for small programs, since the WASM memory is
+/// -> Large partitions are a waste for small programs, since the WASM memory is
 ///    allocated in that granularity and GC is then triggered later.
-pub const PARTITION_SIZE: usize = 32 * MB;
+#[enhanced_orthogonal_persistence]
+pub const PARTITION_SIZE: usize = 64 * 1024 * 1024;
+
+#[classical_persistence]
+pub const PARTITION_SIZE: usize = 32 * 1024 * 1024;
 
-/// Total number of partitions in the memory.
+/// Number of entries per partition table.
+/// Tables are linearly linked, allowing the usage of the entire address space.
+/// Maximum contiguous space is `(PARTITION_PER_TABLE - 1) * PARTITION_SIZE`.
+const PARTITIONS_PER_TABLE: usize = 128; // 8 GB of space for 64 MB partitions and 4 GB for 32 MB partitions.
+
+/// Maximum number of partitions in the memory.
 /// For simplicity, the last partition is left unused, to avoid a numeric overflow when
 /// computing the end address of the last partition.
-const MAX_PARTITIONS: usize = (WASM_MEMORY_BYTE_SIZE.0 / PARTITION_SIZE as u64) as usize - 1;
+const MAX_PARTITIONS: usize = usize::MAX / PARTITION_SIZE;
 
 /// Partitions are only evacuated if the space occupation of alive objects in the partition
 /// is greater than this threshold.
@@ -92,6 +117,21 @@ pub struct Partition {
     update: bool,        // Specifies whether the pointers in the partition have to be updated.
 }
 
+/// Optimization: Avoiding `Option` or `Lazy`.
+#[classical_persistence]
+const UNINITIALIZED_PARTITION: Partition = Partition {
+    index: usize::MAX,
+    free: false,
+    large_content: false,
+    marked_size: 0,
+    static_size: 0,
+    dynamic_size: 0,
+    bitmap: super::mark_bitmap::DEFAULT_MARK_BITMAP,
+    temporary: false,
+    evacuate: false,
+    update: false,
+};
+
 impl Partition {
     pub fn get_index(&self) -> usize {
         self.index
@@ -174,23 +214,23 @@ impl Partition {
         use crate::constants::WORD_SIZE;
         debug_assert!(self.dynamic_space_end() <= self.end_address());
         let remaining_space = self.end_address() - self.dynamic_space_end();
-        debug_assert_eq!(remaining_space % WORD_SIZE as usize, 0);
+        debug_assert_eq!(remaining_space % WORD_SIZE, 0);
         debug_assert!(remaining_space <= PARTITION_SIZE);
         if remaining_space == 0 {
             return;
         }
         let block = self.dynamic_space_end() as *mut Tag;
-        if remaining_space == WORD_SIZE as usize {
+        if remaining_space == WORD_SIZE {
             *block = TAG_ONE_WORD_FILLER;
         } else {
             *block = TAG_FREE_SPACE;
             let header_size = size_of::().to_bytes().as_usize();
             debug_assert!(remaining_space >= header_size);
             let free_space = block as *mut FreeSpace;
-            (*free_space).words = Bytes((remaining_space - header_size) as u32).to_words();
+            (*free_space).words = Bytes(remaining_space - header_size).to_words();
             // Clear the remainder of the free space.
             let clear_start = free_space as usize + header_size;
-            let clear_length = Bytes((remaining_space - header_size) as u32);
+            let clear_length = Bytes(remaining_space - header_size);
             crate::mem_utils::memzero(clear_start, clear_length.to_words());
             debug_assert_eq!(free_space.size().to_bytes().as_usize(), remaining_space);
         }
@@ -232,6 +272,7 @@ impl Partition {
 #[repr(C)]
 pub struct PartitionedHeapIterator {
     partition_index: usize,
+    number_of_partitions: usize,
     bitmap_iterator: StableOption,
     visit_large_object: bool,
 }
@@ -240,6 +281,7 @@ impl PartitionedHeapIterator {
     pub fn new(heap: &PartitionedHeap) -> PartitionedHeapIterator {
         let mut iterator = PartitionedHeapIterator {
             partition_index: 0,
+            number_of_partitions: heap.number_of_partitions,
             bitmap_iterator: StableOption::None,
             visit_large_object: false,
         };
@@ -250,7 +292,7 @@ impl PartitionedHeapIterator {
 
     fn skip_empty_partitions(&mut self, heap: &PartitionedHeap) {
         loop {
-            if self.partition_index == MAX_PARTITIONS {
+            if self.partition_index == self.number_of_partitions {
                 return;
             }
             let partition = heap.get_partition(self.partition_index);
@@ -262,16 +304,16 @@ impl PartitionedHeapIterator {
     }
 
     pub fn has_partition(&self) -> bool {
-        self.partition_index < MAX_PARTITIONS
+        self.partition_index < self.number_of_partitions
     }
 
     pub fn current_partition<'a>(&self, heap: &'a PartitionedHeap) -> &'a Partition {
-        debug_assert!(self.partition_index < MAX_PARTITIONS);
+        debug_assert!(self.partition_index < self.number_of_partitions);
         heap.get_partition(self.partition_index)
     }
 
     pub unsafe fn next_partition(&mut self, heap: &PartitionedHeap) {
-        debug_assert!(self.partition_index < MAX_PARTITIONS);
+        debug_assert!(self.partition_index < self.number_of_partitions);
         let partition = heap.get_partition(self.partition_index);
         let number_of_partitions = if partition.has_large_content() {
             let large_object = partition.dynamic_space_start() as *mut Obj;
@@ -285,8 +327,8 @@ impl PartitionedHeapIterator {
     }
 
     fn start_object_iteration(&mut self, heap: &PartitionedHeap) {
-        debug_assert!(self.partition_index <= MAX_PARTITIONS);
-        if self.partition_index == MAX_PARTITIONS {
+        debug_assert!(self.partition_index <= self.number_of_partitions);
+        if self.partition_index == self.number_of_partitions {
             self.bitmap_iterator = StableOption::None;
             self.visit_large_object = false;
         } else {
@@ -337,11 +379,111 @@ impl PartitionedHeapIterator {
     }
 }
 
+/// The first table is part of the static `PartionedHeap` record.
+/// Extension tables reside in the static space of a partition.
+/// Therefore, partition tables are never moved in memory.
+#[repr(C)]
+struct PartitionTable {
+    partitions: [Partition; PARTITIONS_PER_TABLE],
+    extension: *mut PartitionTable,
+}
+
+/// Optimization: Avoiding `Option` or `Lazy`.
+#[classical_persistence]
+const UNINITIALIZED_PARTITION_TABLE: PartitionTable = PartitionTable {
+    partitions: [UNINITIALIZED_PARTITION; PARTITIONS_PER_TABLE],
+    extension: null_mut(),
+};
+
+const PARTITION_TABLE_SIZE: usize = core::mem::size_of::();
+
+impl PartitionTable {
+    /// The start index is the first partition index in the table.
+    /// The static space starts at the first partition entry of the partition table.
+    pub fn new(start_index: usize, static_space: usize) -> PartitionTable {
+        let completely_static_partitions = static_space / PARTITION_SIZE;
+        let partitions = from_fn(|offset| Partition {
+            index: start_index + offset,
+            free: true,
+            large_content: false,
+            marked_size: 0,
+            static_size: if offset < completely_static_partitions {
+                PARTITION_SIZE
+            } else if offset == completely_static_partitions {
+                static_space % PARTITION_SIZE
+            } else {
+                0
+            },
+            dynamic_size: 0,
+            bitmap: MarkBitmap::new(),
+            temporary: false,
+            evacuate: false,
+            update: false,
+        });
+        PartitionTable {
+            partitions,
+            extension: null_mut(),
+        }
+    }
+
+    /// The table address must point to static partition space.
+    /// The start index is the first partition index in the table.
+    /// The static space starts at the first partition entry of the partition table.
+    pub unsafe fn allocate(
+        table_address: usize,
+        start_index: usize,
+        static_space: usize,
+    ) -> *mut PartitionTable {
+        let table = table_address as *mut PartitionTable;
+        *table = Self::new(start_index, static_space);
+        table
+    }
+}
+
+struct PartitionIterator {
+    table: *mut PartitionTable,
+    index: usize,
+}
+
+impl PartitionIterator {
+    pub fn new(heap: &mut PartitionedHeap) -> PartitionIterator {
+        let first_table = &mut heap.partition_table as *mut PartitionTable;
+        PartitionIterator {
+            table: first_table,
+            index: 0,
+        }
+    }
+}
+
+impl Iterator for PartitionIterator {
+    type Item = &'static mut Partition;
+
+    fn next(&mut self) -> Option {
+        debug_assert_ne!(self.table, null_mut());
+        if self.index > MAX_PARTITIONS {
+            return None;
+        }
+        let table_offset = self.index % PARTITIONS_PER_TABLE;
+        unsafe {
+            if self.index > 0 && table_offset == 0 {
+                self.table = (*self.table).extension;
+                if self.table == null_mut() {
+                    return None;
+                }
+            }
+            let partition = &mut (*self.table).partitions[table_offset];
+            self.index += 1;
+            Some(partition)
+        }
+    }
+}
+
 /// Partitioned heap used by the incremental GC.
 /// Use a long-term representation by relying on C layout.
 #[repr(C)]
 pub struct PartitionedHeap {
-    partitions: [Partition; MAX_PARTITIONS],
+    partition_table: PartitionTable,
+    number_of_partitions: usize,
     heap_base: usize,
     allocation_index: usize, // Index of the partition currently used for allocations.
     free_partitions: usize,  // Number of free partitions.
@@ -353,32 +495,34 @@ pub struct PartitionedHeap {
     evacuated_size: usize, // Size of all evacuated objects during a GC run. Serves for accurate total allocation statistics.
 }
 
+/// Optimization: Avoiding `Option` or `LazyCell`.
+#[classical_persistence]
+pub const UNINITIALIZED_HEAP: PartitionedHeap = PartitionedHeap {
+    partition_table: UNINITIALIZED_PARTITION_TABLE,
+    number_of_partitions: 0,
+    heap_base: 0,
+    allocation_index: 0,
+    free_partitions: 0,
+    evacuating: false,
+    reclaimed: 0,
+    bitmap_allocation_pointer: 0,
+    gc_running: false,
+    precomputed_heap_size: 0,
+    evacuated_size: 0,
+};
+
 impl PartitionedHeap {
-    pub unsafe fn new(mem: &mut M, heap_base: usize) -> PartitionedHeap {
+    pub fn new(heap_base: usize) -> PartitionedHeap {
+        let number_of_partitions = PARTITIONS_PER_TABLE;
         let allocation_index = heap_base / PARTITION_SIZE;
-        mem.grow_memory(((allocation_index + 1) * PARTITION_SIZE) as u64);
-        let partitions = from_fn(|index| Partition {
-            index,
-            free: index > allocation_index,
-            large_content: false,
-            marked_size: 0,
-            static_size: if index < allocation_index {
-                PARTITION_SIZE
-            } else if index == allocation_index {
-                heap_base % PARTITION_SIZE
-            } else {
-                0
-            },
-            dynamic_size: 0,
-            bitmap: MarkBitmap::new(),
-            temporary: false,
-            evacuate: false,
-            update: false,
-        });
-        debug_assert!(allocation_index <= MAX_PARTITIONS);
-        let free_partitions = MAX_PARTITIONS - allocation_index - 1;
+        let mut partition_table = PartitionTable::new(0, heap_base);
+        for index in 0..allocation_index + 1 {
+            partition_table.partitions[index].free = false;
+        }
+        let free_partitions = number_of_partitions - allocation_index - 1;
         PartitionedHeap {
-            partitions,
+            partition_table,
+            number_of_partitions,
             heap_base,
             allocation_index,
             free_partitions,
@@ -391,47 +535,132 @@ impl PartitionedHeap {
         }
     }
 
+    pub fn maximum_mark_bitmap_size(&self) -> usize {
+        debug_assert!(self.free_partitions <= self.number_of_partitions);
+        let used_partitions = self.number_of_partitions - self.free_partitions;
+        used_partitions * BITMAP_SIZE
+    }
+
+    fn partitions(&mut self) -> PartitionIterator {
+        PartitionIterator::new(self)
+    }
+
     pub fn is_initialized(&self) -> bool {
-        self.partitions[0].index == 0
+        self.number_of_partitions > 0
     }
 
     pub fn base_address(&self) -> usize {
         self.heap_base
     }
 
+    unsafe fn get_extension_table(&self, partition_index: usize) -> *mut PartitionTable {
+        debug_assert!(partition_index >= PARTITIONS_PER_TABLE);
+        let mut index = partition_index - PARTITIONS_PER_TABLE;
+        let mut table = self.partition_table.extension;
+        while index >= PARTITIONS_PER_TABLE {
+            index -= PARTITIONS_PER_TABLE;
+            debug_assert_ne!((*table).extension, null_mut());
+            table = (*table).extension;
+        }
+        table
+    }
+
+    unsafe fn get_partition_table(&self, partition_index: usize) -> *const PartitionTable {
+        if partition_index < PARTITIONS_PER_TABLE {
+            let first_table = &self.partition_table as *const PartitionTable;
+            debug_assert_ne!(first_table, null());
+            first_table
+        } else {
+            self.get_extension_table(partition_index)
+        }
+    }
+
+    unsafe fn mutable_partition_table(&mut self, partition_index: usize) -> *mut PartitionTable {
+        self.get_partition_table(partition_index) as *mut PartitionTable
+    }
+
     pub fn get_partition(&self, index: usize) -> &Partition {
-        &self.partitions[index]
+        unsafe {
+            let table = self.get_partition_table(index);
+            let offset = index % PARTITIONS_PER_TABLE;
+            &(*table).partitions[offset]
+        }
     }
 
     fn mutable_partition(&mut self, index: usize) -> &mut Partition {
-        &mut self.partitions[index]
+        unsafe {
+            let table = self.mutable_partition_table(index);
+            let offset = index % PARTITIONS_PER_TABLE;
+            &mut (*table).partitions[offset]
+        }
+    }
+
+    unsafe fn add_partition_table(&mut self, mem: &mut M) {
+        debug_assert!(self.number_of_partitions > 0);
+        let last_table = self.mutable_partition_table(self.number_of_partitions - 1);
+        debug_assert_ne!(last_table, null_mut());
+        debug_assert_eq!((*last_table).extension, null_mut());
+        let table_address = self.number_of_partitions * PARTITION_SIZE;
+        mem.grow_memory(table_address + PARTITION_SIZE);
+        (*last_table).extension = PartitionTable::allocate(
+            table_address,
+            self.number_of_partitions,
+            PARTITION_TABLE_SIZE,
+        );
+        // The partition table is small enough such that all partitions contain free space.
+        debug_assert!(PARTITION_TABLE_SIZE < PARTITION_SIZE);
+        self.free_partitions += PARTITIONS_PER_TABLE;
+        self.number_of_partitions += PARTITIONS_PER_TABLE;
+        self.precomputed_heap_size += PARTITION_TABLE_SIZE;
     }
 
-    unsafe fn allocate_temporary_partition(&mut self) -> &mut Partition {
-        for partition in &mut self.partitions {
+    unsafe fn allocate_partition Option>(
+        &mut self,
+        mem: &mut M,
+        find_partition: &F,
+    ) -> usize {
+        let mut result = find_partition(self);
+        if result.is_none() {
+            self.add_partition_table(mem);
+            result = find_partition(self);
+        }
+        if result.is_none() {
+            rts_trap_with("Cannot grow memory");
+        }
+        result.unwrap()
+    }
+
+    fn scan_for_temporary_partition(&mut self) -> Option {
+        for partition in self.partitions() {
             if partition.is_completely_free() {
-                debug_assert_eq!(partition.dynamic_size, 0);
-                partition.free = false;
-                partition.temporary = true;
-                debug_assert!(self.free_partitions > 0);
-                self.free_partitions -= 1;
-                return partition;
+                return Some(partition.index);
             }
         }
-        rts_trap_with("Cannot grow memory");
+        None
+    }
+
+    unsafe fn allocate_temporary_partition(&mut self, mem: &mut M) -> &mut Partition {
+        let index = self.allocate_partition(mem, &Self::scan_for_temporary_partition);
+        debug_assert!(self.free_partitions > 0);
+        self.free_partitions -= 1;
+        let partition = self.mutable_partition(index);
+        debug_assert_eq!(partition.dynamic_size, 0);
+        partition.free = false;
+        partition.temporary = true;
+        partition
     }
 
     /// The returned bitmap address is guaranteed to be 64-bit-aligned.
     unsafe fn allocate_bitmap(&mut self, mem: &mut M) -> *mut u8 {
         if self.bitmap_allocation_pointer % PARTITION_SIZE == 0 {
-            let partition = self.allocate_temporary_partition();
-            mem.grow_memory(partition.end_address() as u64);
+            let partition = self.allocate_temporary_partition(mem);
+            mem.grow_memory(partition.end_address());
             self.bitmap_allocation_pointer = partition.start_address();
         }
         let bitmap_address = self.bitmap_allocation_pointer as *mut u8;
         self.bitmap_allocation_pointer += BITMAP_SIZE;
         debug_assert_eq!(
-            bitmap_address as usize % size_of::().to_bytes().as_usize(),
+            bitmap_address as usize % size_of::().to_bytes().as_usize(),
             0
         );
         bitmap_address
@@ -473,23 +702,21 @@ impl PartitionedHeap {
         debug_assert_eq!(self.bitmap_allocation_pointer, 0);
         debug_assert!(!self.gc_running);
         self.gc_running = true;
-        for partition_index in 0..MAX_PARTITIONS {
+        for partition_index in 0..self.number_of_partitions {
             let partition = self.get_partition(partition_index);
             if partition.has_dynamic_space() && !partition.has_large_content() {
                 let bitmap_address = self.allocate_bitmap(mem);
                 self.mutable_partition(partition_index)
                     .bitmap
                     .assign(bitmap_address);
-                time.advance(Bytes(BITMAP_SIZE as u32).to_words().as_usize());
+                time.advance(Bytes(BITMAP_SIZE).to_words().as_usize());
             }
         }
     }
 
-    pub fn plan_evacuations(&mut self) {
-        let ranked_partitions = self.rank_partitions_by_garbage();
+    pub unsafe fn plan_evacuations(&mut self, mem: &mut M) {
         debug_assert_eq!(
-            self.partitions
-                .iter()
+            self.partitions()
                 .filter(|partition| partition.is_free())
                 .count(),
             self.free_partitions
@@ -500,7 +727,9 @@ impl PartitionedHeap {
         let reserved_partitions =
             (self.free_partitions + EVACUATION_FRACTION - 1) / EVACUATION_FRACTION;
         let mut evacuation_space = reserved_partitions * PARTITION_SIZE;
-        for index in ranked_partitions {
+        let ranked_partitions = self.rank_partitions_by_garbage(mem);
+        for rank in 0..self.number_of_partitions {
+            let index = *ranked_partitions.add(rank);
             if index != self.allocation_index && self.get_partition(index).is_evacuation_candidate()
             {
                 let partition = self.mutable_partition(index);
@@ -516,26 +745,40 @@ impl PartitionedHeap {
         }
     }
 
-    fn rank_partitions_by_garbage(&self) -> [usize; MAX_PARTITIONS] {
-        let mut ranked_partitions: [usize; MAX_PARTITIONS] = from_fn(|index| index);
-        sort(&mut ranked_partitions, &|left, right| {
-            self.get_partition(left)
-                .garbage_amount()
-                .cmp(&self.get_partition(right).garbage_amount())
-                .reverse()
-        });
+    unsafe fn temporary_array(mem: &mut M, length: usize) -> *mut usize {
+        // No post allocation barrier as this RTS-internal blob can be collected by the GC.
+        let blob = alloc_blob(mem, TAG_BLOB_B, Words(length).to_bytes());
+        let payload = blob.as_blob_mut().payload_addr();
+        payload as *mut usize
+    }
+
+    unsafe fn rank_partitions_by_garbage(&self, mem: &mut M) -> *mut usize {
+        let ranked_partitions = Self::temporary_array(mem, self.number_of_partitions);
+        for index in 0..self.number_of_partitions {
+            *ranked_partitions.add(index) = index;
+        }
+        sort(
+            ranked_partitions,
+            self.number_of_partitions,
+            &|left, right| {
+                self.get_partition(left)
+                    .garbage_amount()
+                    .cmp(&self.get_partition(right).garbage_amount())
+                    .reverse()
+            },
+        );
         ranked_partitions
     }
 
     pub fn plan_updates(&mut self) {
-        for partition in &mut self.partitions {
+        for partition in self.partitions() {
             debug_assert!(!partition.update);
             partition.update = !partition.is_free() && !partition.evacuate;
         }
     }
 
     pub unsafe fn complete_collection(&mut self) {
-        for partition in &mut self.partitions {
+        for partition in self.partitions() {
             let marked_size = partition.marked_size;
             partition.update = false;
             partition.marked_size = 0;
@@ -564,17 +807,26 @@ impl PartitionedHeap {
     }
 
     fn allocation_partition(&self) -> &Partition {
-        &self.partitions[self.allocation_index]
+        self.get_partition(self.allocation_index)
     }
 
     fn mut_allocation_partition(&mut self) -> &mut Partition {
-        &mut self.partitions[self.allocation_index]
+        self.mutable_partition(self.allocation_index)
     }
 
     pub fn is_allocation_partition(&self, index: usize) -> bool {
         self.allocation_index == index
     }
 
+    fn scan_for_free_partition(&mut self, requested_space: usize) -> Option {
+        for partition in self.partitions() {
+            if partition.free && partition.free_size() >= requested_space {
+                return Some(partition.index);
+            }
+        }
+        None
+    }
+
     unsafe fn allocate_free_partition(
         &mut self,
         mem: &mut M,
@@ -585,40 +837,38 @@ impl PartitionedHeap {
         } else {
             null_mut()
         };
-        for partition in &mut self.partitions {
-            if partition.free && partition.free_size() >= requested_space {
-                debug_assert_eq!(partition.dynamic_size, 0);
-                partition.free = false;
-                debug_assert!(self.free_partitions > 0);
-                self.free_partitions -= 1;
-                if bitmap_address != null_mut() {
-                    partition.bitmap.assign(bitmap_address);
-                }
-                return partition;
-            }
+        let index = self.allocate_partition(mem, &|context| {
+            context.scan_for_free_partition(requested_space)
+        });
+        debug_assert!(self.free_partitions > 0);
+        self.free_partitions -= 1;
+        let partition = self.mutable_partition(index);
+        debug_assert_eq!(partition.dynamic_size, 0);
+        partition.free = false;
+        if bitmap_address != null_mut() {
+            partition.bitmap.assign(bitmap_address);
         }
-        rts_trap_with("Cannot grow memory");
+        partition
     }
 
-    fn check_occupied_size(&self) {
+    fn check_occupied_size(&mut self) {
         debug_assert_eq!(
-            self.partitions
-                .iter()
+            self.partitions()
                 .map(|partition| partition.static_size + partition.dynamic_size)
                 .sum::(),
             self.occupied_size().as_usize()
         );
     }
 
-    pub fn occupied_size(&self) -> Bytes {
-        Bytes((self.precomputed_heap_size + self.allocation_partition().dynamic_size) as u32)
+    pub fn occupied_size(&self) -> Bytes {
+        Bytes(self.precomputed_heap_size + self.allocation_partition().dynamic_size)
     }
 
     pub fn reclaimed_size(&self) -> Bytes {
         Bytes(self.reclaimed)
     }
 
-    pub fn increase_evacuated_size(&mut self, size: Words) {
+    pub fn increase_evacuated_size(&mut self, size: Words) {
         self.evacuated_size += size.to_bytes().as_usize();
     }
 
@@ -628,7 +878,7 @@ impl PartitionedHeap {
         Bytes(heap_size_without_evacuations as u64) + self.reclaimed_size()
     }
 
-    pub unsafe fn allocate(&mut self, mem: &mut M, words: Words) -> Value {
+    pub unsafe fn allocate(&mut self, mem: &mut M, words: Words) -> Value {
         let size = words.to_bytes().as_usize();
         if size <= PARTITION_SIZE {
             self.allocate_normal_object(mem, size)
@@ -660,7 +910,7 @@ impl PartitionedHeap {
         self.precomputed_heap_size += self.allocation_partition().dynamic_size;
 
         let new_partition = self.allocate_free_partition(mem, size);
-        mem.grow_memory(new_partition.end_address() as u64);
+        mem.grow_memory(new_partition.end_address());
         let heap_pointer = new_partition.dynamic_space_end();
         new_partition.dynamic_size += size;
         self.allocation_index = new_partition.index;
@@ -676,14 +926,16 @@ impl PartitionedHeap {
         let number_of_partitions = (size + PARTITION_SIZE - 1) / PARTITION_SIZE;
         debug_assert!(number_of_partitions > 0);
 
-        let first_index = self.find_large_space(number_of_partitions);
+        let first_index = self.allocate_partition(mem, &|context| {
+            context.scan_for_large_space(number_of_partitions)
+        });
         let last_index = first_index + number_of_partitions - 1;
 
         debug_assert!(self.free_partitions >= number_of_partitions);
         self.free_partitions -= number_of_partitions;
 
         let end_address = self.get_partition(last_index).end_address();
-        mem.grow_memory(end_address as u64);
+        mem.grow_memory(end_address);
         for index in first_index..last_index + 1 {
             let partition = self.mutable_partition(index);
             debug_assert!(partition.free);
@@ -707,19 +959,19 @@ impl PartitionedHeap {
         Value::from_ptr(first_partition.dynamic_space_start())
     }
 
-    unsafe fn find_large_space(&self, number_of_partitions: usize) -> usize {
+    unsafe fn scan_for_large_space(&self, number_of_partitions: usize) -> Option {
         let mut start_of_free_range = 0;
-        for index in 0..MAX_PARTITIONS {
+        for index in 0..self.number_of_partitions {
             // Invariant: [start_of_free_range .. index) contains only free partitions.
             if self.get_partition(index).is_completely_free() {
                 if index + 1 - start_of_free_range >= number_of_partitions {
-                    return start_of_free_range;
+                    return Some(start_of_free_range);
                 }
             } else {
                 start_of_free_range = index + 1;
             }
         }
-        rts_trap_with("Cannot grow memory");
+        None
     }
 
     unsafe fn occupied_partition_range(large_object: *mut Obj) -> Range {
@@ -737,7 +989,7 @@ impl PartitionedHeap {
 
     pub unsafe fn collect_large_objects(&mut self) {
         let mut index = 0;
-        while index < MAX_PARTITIONS {
+        while index < self.number_of_partitions {
             let partition = self.get_partition(index);
             if partition.has_large_content() {
                 debug_assert!(!partition.free);
@@ -772,20 +1024,28 @@ impl PartitionedHeap {
     #[inline(never)]
     unsafe fn mark_large_object(&mut self, object: *mut Obj) -> bool {
         let range = Self::occupied_partition_range(object);
-        if self.partitions[range.start].marked_size > 0 {
+        if self.get_partition(range.start).marked_size > 0 {
             return false;
         }
         for index in range.start..range.end - 1 {
-            self.partitions[index].marked_size = PARTITION_SIZE;
+            self.mutable_partition(index).marked_size = PARTITION_SIZE;
         }
         let object_size = block_size(object as usize).to_bytes().as_usize();
-        self.partitions[range.end - 1].marked_size = object_size % PARTITION_SIZE;
+        self.mutable_partition(range.end - 1).marked_size = object_size % PARTITION_SIZE;
         true
     }
 
     #[cfg(debug_assertions)]
     unsafe fn is_large_object_marked(&self, object: *mut Obj) -> bool {
         let range = Self::occupied_partition_range(object);
-        self.partitions[range.start].marked_size > 0
+        self.get_partition(range.start).marked_size > 0
     }
 }
+
+#[cfg(feature = "ic")]
+pub(crate) unsafe fn allocate_initial_memory(heap_base: Bytes) {
+    use crate::memory::ic::allocate_wasm_memory;
+
+    let memory_size = heap_base.next_multiple_of(PARTITION_SIZE);
+    allocate_wasm_memory(memory_size);
+}
diff --git a/rts/motoko-rts/src/gc/incremental/phases/evacuation_increment.rs b/rts/motoko-rts/src/gc/incremental/phases/evacuation_increment.rs
index 548f0f62b07..50f414ea031 100644
--- a/rts/motoko-rts/src/gc/incremental/phases/evacuation_increment.rs
+++ b/rts/motoko-rts/src/gc/incremental/phases/evacuation_increment.rs
@@ -18,11 +18,11 @@ pub struct EvacuationIncrement<'a, M: Memory> {
 }
 
 impl<'a, M: Memory + 'a> EvacuationIncrement<'a, M> {
-    pub unsafe fn start_phase(state: &mut State) {
+    pub unsafe fn start_phase(mem: &mut M, state: &mut State) {
         debug_assert!(state.iterator_state.is_none());
         let heap = &mut state.partitioned_heap;
         state.iterator_state = StableOption::Some(PartitionedHeapIterator::new(heap));
-        heap.plan_evacuations();
+        heap.plan_evacuations(mem);
     }
 
     pub unsafe fn complete_phase(state: &mut State) {
diff --git a/rts/motoko-rts/src/gc/incremental/phases/mark_increment.rs b/rts/motoko-rts/src/gc/incremental/phases/mark_increment.rs
index e71c8be3713..7ddc52c6088 100644
--- a/rts/motoko-rts/src/gc/incremental/phases/mark_increment.rs
+++ b/rts/motoko-rts/src/gc/incremental/phases/mark_increment.rs
@@ -1,3 +1,5 @@
+use motoko_rts_macros::{classical_persistence, enhanced_orthogonal_persistence};
+
 use crate::{
     gc::incremental::{
         array_slicing::slice_array,
@@ -67,7 +69,7 @@ impl<'a, M: Memory + 'a> MarkIncrement<'a, M> {
     }
 
     pub unsafe fn mark_roots(&mut self, roots: Roots) {
-        visit_roots(roots, self, |gc, field| {
+        visit_roots(roots, self.heap.base_address(), self, |gc, field| {
             gc.mark_object(*field);
             gc.time.tick();
         });
@@ -81,7 +83,12 @@ impl<'a, M: Memory + 'a> MarkIncrement<'a, M> {
         }
         loop {
             let value = self.mark_stack.pop();
+
+            #[enhanced_orthogonal_persistence]
             debug_assert!(value.is_non_null_ptr());
+            #[classical_persistence]
+            debug_assert!(value.is_ptr());
+
             if value == STACK_EMPTY {
                 self.complete_marking();
                 return;
@@ -98,7 +105,10 @@ impl<'a, M: Memory + 'a> MarkIncrement<'a, M> {
 
     pub unsafe fn mark_object(&mut self, value: Value) {
         self.time.tick();
+
+        #[enhanced_orthogonal_persistence]
         debug_assert_ne!(value, NULL_POINTER);
+
         debug_assert!((value.get_ptr() >= self.heap.base_address()));
         debug_assert!(!value.is_forwarded());
         let object = value.as_obj();
@@ -115,6 +125,7 @@ impl<'a, M: Memory + 'a> MarkIncrement<'a, M> {
             self,
             object,
             object.tag(),
+            self.heap.base_address(),
             |gc, field_address| {
                 let field_value = *field_address;
                 gc.mark_object(field_value);
@@ -124,7 +135,7 @@ impl<'a, M: Memory + 'a> MarkIncrement<'a, M> {
                 if (*array).header.tag >= TAG_ARRAY_SLICE_MIN {
                     gc.mark_stack.push(gc.mem, Value::from_ptr(array as usize));
                 }
-                gc.time.advance(1 + (length - slice_start) as usize);
+                gc.time.advance(1 + length - slice_start);
                 length
             },
         );
diff --git a/rts/motoko-rts/src/gc/incremental/phases/update_increment.rs b/rts/motoko-rts/src/gc/incremental/phases/update_increment.rs
index 57e0dc21c54..ee9159ed03c 100644
--- a/rts/motoko-rts/src/gc/incremental/phases/update_increment.rs
+++ b/rts/motoko-rts/src/gc/incremental/phases/update_increment.rs
@@ -1,3 +1,5 @@
+use motoko_rts_macros::enhanced_orthogonal_persistence;
+
 use crate::{
     gc::incremental::{
         array_slicing::slice_array,
@@ -50,9 +52,12 @@ impl<'a> UpdateIncrement<'a> {
     }
 
     pub unsafe fn update_roots(&mut self, roots: Roots) {
-        visit_roots(roots, self, |gc, field| {
+        visit_roots(roots, self.heap.base_address(), self, |gc, field| {
             let value = *field;
+
+            #[enhanced_orthogonal_persistence]
             debug_assert_ne!(value, NULL_POINTER);
+
             if value.is_forwarded() {
                 *field = value.forward_if_possible();
             }
@@ -108,13 +113,14 @@ impl<'a> UpdateIncrement<'a> {
                 self,
                 object,
                 object.tag(),
+                self.heap.base_address(),
                 |gc, field_address| {
                     *field_address = (*field_address).forward_if_possible();
                     gc.time.tick();
                 },
                 |gc, slice_start, array| {
                     let length = slice_array(array);
-                    gc.time.advance(1 + (length - slice_start) as usize);
+                    gc.time.advance(1 + length - slice_start);
                     length
                 },
             );
diff --git a/rts/motoko-rts/src/gc/incremental/roots.rs b/rts/motoko-rts/src/gc/incremental/roots.rs
index afef9cf7577..b0ad5a77d84 100644
--- a/rts/motoko-rts/src/gc/incremental/roots.rs
+++ b/rts/motoko-rts/src/gc/incremental/roots.rs
@@ -1,84 +1,47 @@
-use motoko_rts_macros::ic_mem_fn;
+#[enhanced_orthogonal_persistence]
+pub mod enhanced;
 
-use crate::{types::Value, visitor::is_non_null_pointer_field};
+#[classical_persistence]
+pub mod classical;
 
-/// An array referring to the static program variables, being
-/// - All canister variables.
-/// - Pooled shared objects.
-/// The array constitutes a GC root that is reinitialized on each canister upgrade.
-/// The scalar sentinel denotes an uninitialized root.
-#[cfg(feature = "ic")]
-static mut STATIC_VARIABLES: Value = crate::types::NULL_POINTER;
+use motoko_rts_macros::{classical_persistence, enhanced_orthogonal_persistence};
 
-/// Sanity check for the variable initialization: The variables must be initialized
-/// in increasing order and may only read precedingly initialized variables.
-#[cfg(feature = "ic")]
-static mut INITIALIZED_VARIABLES: u32 = 0;
+use crate::types::Value;
+
+#[enhanced_orthogonal_persistence]
+pub type Roots = self::enhanced::Roots;
 
-/// GC root set.
-pub type Roots = [*mut Value; 6];
+#[classical_persistence]
+pub type Roots = self::classical::Roots;
 
 #[cfg(feature = "ic")]
+#[enhanced_orthogonal_persistence]
 pub unsafe fn root_set() -> Roots {
-    use crate::{
-        continuation_table::continuation_table_loc,
-        persistence::{stable_actor_location, stable_type_descriptor},
-        region::region0_get_ptr_loc,
-    };
-    [
-        static_variables_location(),
-        continuation_table_loc(),
-        stable_actor_location(),
-        stable_type_descriptor().candid_data_location(),
-        stable_type_descriptor().type_offsets_location(),
-        region0_get_ptr_loc(),
-    ]
+    self::enhanced::root_set()
 }
 
+#[cfg(feature = "ic")]
+#[classical_persistence]
+pub unsafe fn root_set() -> Roots {
+    self::classical::root_set()
+}
+
+#[enhanced_orthogonal_persistence]
 pub unsafe fn visit_roots(
     roots: Roots,
+    _heap_base: usize, // Only used with classical persistence.
     context: &mut C,
     visit_field: V,
 ) {
-    for location in roots {
-        if is_non_null_pointer_field(location) {
-            visit_field(context, location);
-        }
-    }
+    self::enhanced::visit_roots(roots, context, visit_field);
 }
 
-#[cfg(feature = "ic")]
-unsafe fn static_variables_location() -> *mut Value {
-    &mut STATIC_VARIABLES as *mut Value
-}
-
-#[ic_mem_fn(ic_only)]
-pub unsafe fn initialize_static_variables(mem: &mut M, amount: u32) {
-    use super::barriers::write_with_barrier;
-    use crate::memory::alloc_array;
-    use crate::types::{NULL_POINTER, TAG_ARRAY_M};
-
-    let variables = alloc_array(mem, TAG_ARRAY_M, amount);
-    let array = variables.as_array();
-    for index in 0..amount {
-        array.initialize(index, NULL_POINTER, mem);
-    }
-    let location = &mut STATIC_VARIABLES as *mut Value;
-    write_with_barrier(mem, location, variables);
-}
-
-#[no_mangle]
-#[cfg(feature = "ic")]
-pub unsafe extern "C" fn get_static_variable(index: u32) -> Value {
-    debug_assert!(STATIC_VARIABLES.is_non_null_ptr());
-    debug_assert!(index < INITIALIZED_VARIABLES);
-    STATIC_VARIABLES.as_array().get(index)
-}
-
-#[ic_mem_fn(ic_only)]
-pub unsafe fn set_static_variable(mem: &mut M, index: u32, value: Value) {
-    debug_assert!(STATIC_VARIABLES.is_non_null_ptr());
-    debug_assert!(index == INITIALIZED_VARIABLES);
-    STATIC_VARIABLES.as_array().set(index, value, mem);
-    INITIALIZED_VARIABLES += 1;
+#[classical_persistence]
+pub unsafe fn visit_roots(
+    roots: Roots,
+    heap_base: usize, // Only used with classical persistence.
+    context: &mut C,
+    visit_field: V,
+) {
+    self::classical::visit_roots(roots, heap_base, context, visit_field);
 }
diff --git a/rts/motoko-rts/src/gc/incremental/roots/classical.rs b/rts/motoko-rts/src/gc/incremental/roots/classical.rs
new file mode 100644
index 00000000000..d6313822fb1
--- /dev/null
+++ b/rts/motoko-rts/src/gc/incremental/roots/classical.rs
@@ -0,0 +1,75 @@
+use crate::types::Value;
+use crate::visitor::classical::pointer_to_dynamic_heap;
+
+#[derive(Clone, Copy)]
+pub struct Roots {
+    pub static_roots: Value,
+    pub continuation_table_location: *mut Value,
+    pub region0_ptr_location: *mut Value,
+    // If new roots are added in future, extend `visit_roots()`.
+}
+
+#[cfg(feature = "ic")]
+pub unsafe fn root_set() -> Roots {
+    use crate::memory::ic;
+    Roots {
+        static_roots: ic::get_static_roots(),
+        continuation_table_location: crate::continuation_table::continuation_table_loc(),
+        region0_ptr_location: crate::region::region0_get_ptr_loc(),
+    }
+}
+
+pub unsafe fn visit_roots(
+    roots: Roots,
+    heap_base: usize,
+    context: &mut C,
+    visit_field: V,
+) {
+    visit_static_roots(roots.static_roots, heap_base, context, &visit_field);
+    visit_continuation_table(
+        roots.continuation_table_location,
+        heap_base,
+        context,
+        &visit_field,
+    );
+    visit_region0_ptr(roots.region0_ptr_location, heap_base, context, &visit_field);
+}
+
+unsafe fn visit_static_roots(
+    static_roots: Value,
+    heap_base: usize,
+    context: &mut C,
+    visit_field: &V,
+) {
+    let root_array = static_roots.as_array();
+    for index in 0..root_array.len() {
+        let mutbox = root_array.get(index).as_mutbox();
+        debug_assert!((mutbox as usize) < heap_base);
+        let field = &mut (*mutbox).field;
+        if pointer_to_dynamic_heap(field, heap_base) {
+            visit_field(context, field);
+        }
+    }
+}
+
+unsafe fn visit_continuation_table(
+    continuation_table_location: *mut Value,
+    heap_base: usize,
+    context: &mut C,
+    visit_field: &V,
+) {
+    if pointer_to_dynamic_heap(continuation_table_location, heap_base) {
+        visit_field(context, continuation_table_location);
+    }
+}
+
+unsafe fn visit_region0_ptr(
+    region0_ptr_location: *mut Value,
+    heap_base: usize,
+    context: &mut C,
+    visit_field: &V,
+) {
+    if pointer_to_dynamic_heap(region0_ptr_location, heap_base) {
+        visit_field(context, region0_ptr_location);
+    }
+}
diff --git a/rts/motoko-rts/src/gc/incremental/roots/enhanced.rs b/rts/motoko-rts/src/gc/incremental/roots/enhanced.rs
new file mode 100644
index 00000000000..a34feee9406
--- /dev/null
+++ b/rts/motoko-rts/src/gc/incremental/roots/enhanced.rs
@@ -0,0 +1,89 @@
+use motoko_rts_macros::ic_mem_fn;
+
+use crate::types::Value;
+use crate::visitor::enhanced::is_non_null_pointer_field;
+
+/// An array referring to the static program variables, being
+/// - All canister variables.
+/// - Pooled shared objects.
+/// The array constitutes a GC root that is reinitialized on each canister upgrade.
+/// The scalar sentinel denotes an uninitialized root.
+#[cfg(feature = "ic")]
+static mut STATIC_VARIABLES: Value = crate::types::NULL_POINTER;
+
+/// Sanity check for the variable initialization: The variables must be initialized
+/// in increasing order and may only read precedingly initialized variables.
+#[cfg(feature = "ic")]
+static mut INITIALIZED_VARIABLES: usize = 0;
+
+/// GC root set.
+pub type Roots = [*mut Value; 6];
+
+pub unsafe fn visit_roots(
+    roots: Roots,
+    context: &mut C,
+    visit_field: V,
+) {
+    for location in roots {
+        if is_non_null_pointer_field(location) {
+            visit_field(context, location);
+        }
+    }
+}
+
+#[cfg(feature = "ic")]
+pub unsafe fn root_set() -> Roots {
+    use crate::{
+        continuation_table::continuation_table_loc,
+        persistence::{stable_actor_location, stable_type_descriptor},
+        region::region0_get_ptr_loc,
+    };
+    [
+        static_variables_location(),
+        continuation_table_loc(),
+        stable_actor_location(),
+        stable_type_descriptor().candid_data_location(),
+        stable_type_descriptor().type_offsets_location(),
+        region0_get_ptr_loc(),
+    ]
+}
+
+#[cfg(feature = "ic")]
+unsafe fn static_variables_location() -> *mut Value {
+    &mut STATIC_VARIABLES as *mut Value
+}
+
+#[ic_mem_fn(ic_only)]
+pub unsafe fn initialize_static_variables(mem: &mut M, amount: usize) {
+    use crate::barriers::write_with_barrier;
+    use crate::memory::alloc_array;
+    use crate::types::{NULL_POINTER, TAG_ARRAY_M};
+
+    let variables = alloc_array(mem, TAG_ARRAY_M, amount);
+    let array = variables.as_array();
+    for index in 0..amount {
+        array.initialize(index, NULL_POINTER, mem);
+    }
+    let location = &mut STATIC_VARIABLES as *mut Value;
+    write_with_barrier(mem, location, variables);
+}
+
+#[no_mangle]
+#[cfg(feature = "ic")]
+pub unsafe extern "C" fn get_static_variable(index: usize) -> Value {
+    debug_assert!(STATIC_VARIABLES.is_non_null_ptr());
+    debug_assert!(index < INITIALIZED_VARIABLES);
+    STATIC_VARIABLES.as_array().get(index)
+}
+
+#[ic_mem_fn(ic_only)]
+pub unsafe fn set_static_variable(
+    mem: &mut M,
+    index: usize,
+    value: Value,
+) {
+    debug_assert!(STATIC_VARIABLES.is_non_null_ptr());
+    debug_assert!(index == INITIALIZED_VARIABLES);
+    STATIC_VARIABLES.as_array().set(index, value, mem);
+    INITIALIZED_VARIABLES += 1;
+}
diff --git a/rts/motoko-rts/src/gc/incremental/sanity_checks.rs b/rts/motoko-rts/src/gc/incremental/sanity_checks.rs
index 1cbe922a0a4..aaa12dd1e0e 100644
--- a/rts/motoko-rts/src/gc/incremental/sanity_checks.rs
+++ b/rts/motoko-rts/src/gc/incremental/sanity_checks.rs
@@ -1,14 +1,12 @@
 //! Incremental GC sanity checker
 #![allow(dead_code)]
 
-mod remembered_set;
-
 use crate::gc::incremental::partitioned_heap::PARTITION_SIZE;
 use crate::memory::Memory;
 use crate::types::*;
 use crate::visitor::visit_pointer_fields;
 
-use self::remembered_set::RememberedSet;
+use crate::gc::remembered_set::RememberedSet;
 
 use super::mark_stack::{MarkStack, STACK_EMPTY};
 use super::partitioned_heap::PartitionedHeap;
@@ -65,7 +63,7 @@ impl<'a, M: Memory> MemoryChecker<'a, M> {
     }
 
     unsafe fn check_roots(&mut self) {
-        visit_roots(self.roots, self, |gc, field| {
+        visit_roots(self.roots, self.heap.base_address(), self, |gc, field| {
             gc.check_object(*field);
         });
     }
@@ -100,13 +98,10 @@ impl<'a, M: Memory> MemoryChecker<'a, M> {
             self,
             object,
             object.tag(),
+            0,
             |gc, field_address| {
                 let value = *field_address;
-                if value.is_non_null_ptr() {
-                    gc.check_object(value);
-                } else {
-                    gc.check_object_header(value);
-                }
+                gc.check_object(value);
             },
             |_, _, array| array.len(),
         );
diff --git a/rts/motoko-rts/src/gc/incremental/scheduling.rs b/rts/motoko-rts/src/gc/incremental/scheduling.rs
new file mode 100644
index 00000000000..ef081a94e38
--- /dev/null
+++ b/rts/motoko-rts/src/gc/incremental/scheduling.rs
@@ -0,0 +1,105 @@
+//! Heuristics for GC scheduling.
+//! Tuned for 64-bit main memory with unknown IC memory capacity.
+//!
+//! Distinction of three scheduling modes:
+//! * Critical: The heap exceeds 80% of the memory capacity (using capacity probing):
+//!   - Schedule a GC start with a frequency of 1% of the heap growth.
+//! * Medium: The heap exceeds 50% of the memory capacity:
+//!   - Schedule a GC start with a frequency of 35% of the heap growth.
+//! * Low: The heap is below 50% of the memory capacity.
+//!   - Schedule a GC start with a frequency of 65% of the heap growth.
+//!
+//! A heuristics for capacity probing is used to determine the minimum amount of memory capacity.
+//! This is necessary because the IC does not provide runtime information about the implemented
+//! Wasm memory capacity in 64-bit. This capacity may also increase over time with newer IC versions.
+
+use crate::gc::incremental::{get_incremental_gc_state, partitioned_heap::PARTITION_SIZE};
+use crate::memory::ic::partitioned_memory::{get_heap_size, get_total_allocations};
+use crate::types::Bytes;
+use motoko_rts_macros::{classical_persistence, enhanced_orthogonal_persistence};
+
+struct HeapThresholds {
+    critical_heap_limit: Bytes,
+    medium_heap_limit: Bytes,
+}
+
+#[classical_persistence]
+impl HeapThresholds {
+    unsafe fn get() -> HeapThresholds {
+        use crate::constants::{GB, MB};
+
+        const CRITICAL_HEAP_LIMIT: Bytes = Bytes(2 * GB + 256 * MB);
+        const MEDIUM_HEAP_LIMIT: Bytes = Bytes(1 * GB);
+        HeapThresholds {
+            critical_heap_limit: CRITICAL_HEAP_LIMIT,
+            medium_heap_limit: MEDIUM_HEAP_LIMIT,
+        }
+    }
+}
+
+#[enhanced_orthogonal_persistence]
+impl HeapThresholds {
+    /// Heuristics: Determine the threshold values of the heap size to schedule a new GC start.
+    /// Note:
+    /// As the maximum Wasm memory capacity supported by the IC is not known and can be increased
+    /// over time, we probe the memory extension when the free space seems to be critically low.
+    /// This is to avoid unnecessary heavy GC scheduling when approaching a supposed memory limit
+    /// that can actually be extended.
+    unsafe fn get() -> HeapThresholds {
+        use crate::memory::ic::enhanced_memory::{minimum_memory_capacity, probe_wasm_memory};
+
+        let thresholds = HeapThresholds::get_without_probing();
+        let heap_size = get_heap_size();
+        // Only if the heap size seems to be critically low, try to expand the Wasm memory beyond the
+        // assumed current available memory.
+        if heap_size > thresholds.critical_heap_limit
+            && probe_wasm_memory(minimum_memory_capacity() + Bytes(1))
+        {
+            // Obtain the extended heap thresholds.
+            HeapThresholds::get_without_probing()
+        } else {
+            thresholds
+        }
+    }
+
+    /// Obtain the heap thresholds without memory probing:
+    /// The critical limit is 80% of the currently known memory size.
+    /// The medium limit is 50% of the currently known memory size.
+    unsafe fn get_without_probing() -> HeapThresholds {
+        use crate::memory::ic::enhanced_memory::minimum_memory_capacity;
+
+        let available_memory = minimum_memory_capacity().as_usize();
+        let critical_heap_limit = Bytes(available_memory / 10 * 8); // 80%
+        let medium_heap_limit: Bytes = Bytes(available_memory / 2); // 50%
+        HeapThresholds {
+            critical_heap_limit,
+            medium_heap_limit,
+        }
+    }
+}
+
+/// Determine whether a new GC run should be started based on the heap growth since
+/// the last GC completion.
+pub unsafe fn should_start_gc() -> bool {
+    let heap_size = get_heap_size();
+
+    const CRITICAL_GROWTH_THRESHOLD: f64 = 0.01;
+    const MEDIUM_GROWTH_THRESHOLD: f64 = 0.35;
+    const LOW_GROWTH_THRESHOLD: f64 = 0.65;
+
+    let heap_thresholds = HeapThresholds::get();
+    let growth_threshold = if heap_size > heap_thresholds.critical_heap_limit {
+        CRITICAL_GROWTH_THRESHOLD
+    } else if heap_size > heap_thresholds.medium_heap_limit {
+        MEDIUM_GROWTH_THRESHOLD
+    } else {
+        LOW_GROWTH_THRESHOLD
+    };
+
+    let current_allocations = get_total_allocations();
+    let state = get_incremental_gc_state();
+    debug_assert!(current_allocations >= state.statistics.last_allocations);
+    let absolute_growth = current_allocations - state.statistics.last_allocations;
+    let relative_growth = absolute_growth.0 as f64 / heap_size.as_usize() as f64;
+    relative_growth > growth_threshold && heap_size.as_usize() >= PARTITION_SIZE
+}
diff --git a/rts/motoko-rts/src/gc/incremental/sort.rs b/rts/motoko-rts/src/gc/incremental/sort.rs
index 34d4dfa76e5..ea4e2e8097e 100644
--- a/rts/motoko-rts/src/gc/incremental/sort.rs
+++ b/rts/motoko-rts/src/gc/incremental/sort.rs
@@ -2,32 +2,42 @@ use core::cmp::Ordering;
 
 /// Simple in-place sort algorithm that does not allocate additional memory.
 /// Based on quicksort. Average runtime costs of `O(n*log(n))`.
-pub fn sort Ordering>(array: &mut [usize], compare: &Compare) {
-    if array.len() > 1 {
-        quicksort(array, compare);
+pub unsafe fn sort Ordering>(
+    array: *mut usize,
+    length: usize,
+    compare: &Compare,
+) {
+    if length > 1 {
+        quicksort(array, 0, length - 1, compare);
     }
 }
 
 /// Special version of quicksort by Niklaus Wirth: https://people.inf.ethz.ch/wirth/AD.pdf, pages 65/66.
-fn quicksort Ordering>(array: &mut [usize], compare: &Compare) {
+/// Sorting `array[start..end+1]`, i.e. `start` and `end` are inclusive indices.
+unsafe fn quicksort Ordering>(
+    array: *mut usize,
+    start: usize,
+    end: usize,
+    compare: &Compare,
+) {
     // Require at least two elements to avoid the unsigned integer underflow of `backward`.
-    debug_assert!(array.len() > 1);
+    debug_assert!(start < end);
     // Take the middle element as pivot to optimize for the case of a sorted or nearly sorted array.
-    let pivot = array[array.len() / 2];
-    let mut forward = 0;
-    let mut backward = array.len() - 1;
+    let pivot = *array.add((start + end + 1) / 2);
+    let mut forward = start;
+    let mut backward = end;
     loop {
-        while compare(array[forward], pivot) == Ordering::Less {
+        while compare(*array.add(forward), pivot) == Ordering::Less {
             forward += 1;
         }
-        while compare(array[backward], pivot) == Ordering::Greater {
+        while compare(*array.add(backward), pivot) == Ordering::Greater {
             backward -= 1;
         }
         if forward <= backward {
             // Cannot use `array.swap()` since it imports `memmove` in debug build.
-            let temporary = array[forward];
-            array[forward] = array[backward];
-            array[backward] = temporary;
+            let temporary = *array.add(forward);
+            *array.add(forward) = *array.add(backward);
+            *array.add(backward) = temporary;
             forward += 1;
             backward -= 1;
         }
@@ -38,10 +48,10 @@ fn quicksort Ordering>(array: &mut [usize], compare
     // Invariant after partitioning: `forward > backward`. Therefore:
     // * The left partition ends at `backward`.
     // * The right partition starts at `forward`.
-    if backward > 0 {
-        quicksort(&mut array[..backward + 1], compare);
+    if backward > start {
+        quicksort(array, start, backward, compare);
     }
-    if forward < array.len() - 1 {
-        quicksort(&mut array[forward..], compare);
+    if forward < end {
+        quicksort(array, forward, end, compare);
     }
 }
diff --git a/rts/motoko-rts/src/gc/incremental/time.rs b/rts/motoko-rts/src/gc/incremental/time.rs
index bc91a01cfdf..134d415e2ea 100644
--- a/rts/motoko-rts/src/gc/incremental/time.rs
+++ b/rts/motoko-rts/src/gc/incremental/time.rs
@@ -10,6 +10,7 @@ impl BoundedTime {
         BoundedTime { steps: 0, limit }
     }
 
+    // TODO: Saturating add is probably no longer needed in 64-bit (if that makes it faster).
     pub fn tick(&mut self) {
         self.steps = usize::saturating_add(self.steps, 1);
     }
diff --git a/rts/motoko-rts/src/gc/mark_compact.rs b/rts/motoko-rts/src/gc/mark_compact.rs
new file mode 100644
index 00000000000..c9d894f6c7f
--- /dev/null
+++ b/rts/motoko-rts/src/gc/mark_compact.rs
@@ -0,0 +1,303 @@
+//! Implements threaded compaction as described in "High-Performance Garbage Collection for
+//! Memory-Constrained Environments" section 5.1.2, which is an improved version of the original
+//! threaded compaction algorithm described in The Garbage Collection Handbook section 3.3.
+
+pub mod bitmap;
+pub mod mark_stack;
+
+use bitmap::{alloc_bitmap, free_bitmap, get_bit, iter_bits, set_bit, BITMAP_ITER_END};
+use mark_stack::{alloc_mark_stack, free_mark_stack, pop_mark_stack, push_mark_stack};
+
+use crate::constants::WORD_SIZE;
+use crate::mem_utils::memcpy_words;
+use crate::memory::Memory;
+use crate::types::*;
+use crate::visitor::{classical::pointer_to_dynamic_heap, visit_pointer_fields};
+
+use motoko_rts_macros::ic_mem_fn;
+
+// Only designed for 32-bit.
+const _: () = assert!(core::mem::size_of::() == core::mem::size_of::());
+
+#[no_mangle]
+#[cfg(feature = "ic")]
+pub unsafe extern "C" fn initialize_compacting_gc() {
+    crate::memory::ic::linear_memory::initialize();
+}
+
+#[ic_mem_fn(ic_only)]
+unsafe fn schedule_compacting_gc(mem: &mut M) {
+    // 512 MiB slack for mark stack + allocation area for the next message
+    let slack: u64 = 512 * 1024 * 1024;
+    let heap_size_bytes: u64 =
+        crate::constants::WASM32_HEAP_SIZE.as_usize() as u64 * WORD_SIZE as u64;
+    // Larger than necessary to keep things simple
+    let max_bitmap_size_bytes = heap_size_bytes / 32;
+    // NB. `max_live` is evaluated in compile time to a constant
+    let max_live: Bytes = Bytes(heap_size_bytes - slack - max_bitmap_size_bytes);
+
+    if super::should_do_gc(max_live) {
+        compacting_gc(mem);
+    }
+}
+
+#[ic_mem_fn(ic_only)]
+unsafe fn compacting_gc(mem: &mut M) {
+    use crate::memory::ic::{self, linear_memory};
+
+    compacting_gc_internal(
+        mem,
+        ic::get_aligned_heap_base(),
+        // get_hp
+        || linear_memory::get_hp_unskewed(),
+        // set_hp
+        |hp| linear_memory::set_hp_unskewed(hp),
+        ic::get_static_roots(),
+        crate::continuation_table::continuation_table_loc(),
+        crate::region::region0_get_ptr_loc(),
+        // note_live_size
+        |live_size| linear_memory::MAX_LIVE = ::core::cmp::max(linear_memory::MAX_LIVE, live_size),
+        // note_reclaimed
+        |reclaimed| linear_memory::RECLAIMED += Bytes(reclaimed.as_usize() as u64),
+    );
+
+    linear_memory::LAST_HP = linear_memory::get_hp_unskewed();
+}
+
+pub unsafe fn compacting_gc_internal<
+    M: Memory,
+    GetHp: Fn() -> usize,
+    SetHp: Fn(usize),
+    NoteLiveSize: Fn(Bytes),
+    NoteReclaimed: Fn(Bytes),
+>(
+    mem: &mut M,
+    heap_base: usize,
+    get_hp: GetHp,
+    set_hp: SetHp,
+    static_roots: Value,
+    continuation_table_ptr_loc: *mut Value,
+    region0_ptr_loc: *mut Value,
+    note_live_size: NoteLiveSize,
+    note_reclaimed: NoteReclaimed,
+) {
+    let old_hp = get_hp();
+
+    assert_eq!(heap_base % 32, 0);
+
+    mark_compact(
+        mem,
+        set_hp,
+        heap_base,
+        old_hp,
+        static_roots,
+        continuation_table_ptr_loc,
+        region0_ptr_loc,
+    );
+
+    let reclaimed = old_hp - get_hp();
+    note_reclaimed(Bytes(reclaimed));
+
+    let live = get_hp() - heap_base;
+    note_live_size(Bytes(live));
+}
+
+unsafe fn mark_compact(
+    mem: &mut M,
+    set_hp: SetHp,
+    heap_base: usize,
+    heap_end: usize,
+    static_roots: Value,
+    continuation_table_ptr_loc: *mut Value,
+    region0_ptr_loc: *mut Value,
+) {
+    let mem_size = Bytes(heap_end - heap_base);
+
+    alloc_bitmap(mem, mem_size, heap_base / WORD_SIZE);
+    alloc_mark_stack(mem);
+
+    mark_static_roots(mem, static_roots, heap_base);
+
+    if (*continuation_table_ptr_loc).is_ptr() {
+        mark_object(mem, *continuation_table_ptr_loc);
+        // Similar to `mark_root_mutbox_fields`, `continuation_table_ptr_loc` is in static heap so
+        // it will be readable when we unthread the continuation table
+        thread(continuation_table_ptr_loc);
+    }
+
+    if (*region0_ptr_loc).is_ptr() {
+        mark_object(mem, *region0_ptr_loc);
+        thread(region0_ptr_loc);
+    }
+
+    mark_stack(mem, heap_base);
+
+    update_refs(set_hp, heap_base);
+
+    free_mark_stack();
+    free_bitmap();
+}
+
+unsafe fn mark_static_roots(mem: &mut M, static_roots: Value, heap_base: usize) {
+    let root_array = static_roots.as_array();
+
+    // Static objects are not in the dynamic heap so don't need marking.
+    for i in 0..root_array.len() {
+        let obj = root_array.get(i).as_obj();
+        // Root array should only have pointers to other static MutBoxes
+        debug_assert_eq!(obj.tag(), TAG_MUTBOX); // check tag
+        debug_assert!((obj as usize) < heap_base); // check that MutBox is static
+        mark_root_mutbox_fields(mem, obj as *mut MutBox, heap_base);
+    }
+}
+
+unsafe fn mark_object(mem: &mut M, obj: Value) {
+    let obj_tag = obj.tag();
+    let obj = obj.get_ptr();
+
+    // Check object alignment to avoid undefined behavior. See also static_checks module.
+    debug_assert_eq!(obj % WORD_SIZE, 0);
+
+    let obj_idx = obj / WORD_SIZE;
+
+    if get_bit(obj_idx) {
+        // Already marked
+        return;
+    }
+
+    set_bit(obj_idx);
+    push_mark_stack(mem, obj, obj_tag);
+}
+
+unsafe fn mark_stack(mem: &mut M, heap_base: usize) {
+    while let Some((obj, tag)) = pop_mark_stack() {
+        mark_fields(mem, obj as *mut Obj, tag, heap_base)
+    }
+}
+
+unsafe fn mark_fields(mem: &mut M, obj: *mut Obj, obj_tag: Tag, heap_base: usize) {
+    visit_pointer_fields(
+        mem,
+        obj,
+        obj_tag,
+        heap_base,
+        |mem, field_addr| {
+            let field_value = *field_addr;
+            mark_object(mem, field_value);
+
+            // Thread if backwards or self pointer
+            if field_value.get_ptr() <= obj as usize {
+                thread(field_addr);
+            }
+        },
+        |mem, slice_start, arr| {
+            const SLICE_INCREMENT: usize = 127;
+            debug_assert!(SLICE_INCREMENT >= TAG_ARRAY_SLICE_MIN);
+            if arr.len() - slice_start > SLICE_INCREMENT {
+                let new_start = slice_start + SLICE_INCREMENT;
+                // push an entire (suffix) array slice
+                push_mark_stack(mem, arr as usize, new_start);
+                new_start
+            } else {
+                arr.len()
+            }
+        },
+    );
+}
+
+/// Specialized version of `mark_fields` for root `MutBox`es.
+unsafe fn mark_root_mutbox_fields(mem: &mut M, mutbox: *mut MutBox, heap_base: usize) {
+    let field_addr = &mut (*mutbox).field;
+    if pointer_to_dynamic_heap(field_addr, heap_base) {
+        mark_object(mem, *field_addr);
+        // It's OK to thread forward pointers here as the static objects won't be moved, so we will
+        // be able to unthread objects pointed by these fields later.
+        thread(field_addr);
+    }
+}
+
+/// Linearly scan the heap, for each live object:
+///
+/// - Mark step threads all backwards pointers and pointers from roots, so unthread to update those
+///   pointers to the objects new location.
+///
+/// - Move the object
+///
+/// - Thread forward pointers of the object
+///
+unsafe fn update_refs(set_hp: SetHp, heap_base: usize) {
+    let mut free = heap_base;
+
+    let mut bitmap_iter = iter_bits();
+    let mut bit = bitmap_iter.next();
+    while bit != BITMAP_ITER_END {
+        let p = (bit * WORD_SIZE) as *mut Obj;
+        let p_new = free;
+
+        // Update backwards references to the object's new location and restore object header
+        unthread(p, p_new);
+
+        // Move the object
+        let p_size_words = block_size(p as usize);
+        if p_new as usize != p as usize {
+            memcpy_words(p_new as usize, p as usize, p_size_words);
+
+            debug_assert!(p_size_words.as_usize() > size_of::().as_usize());
+            // Update forwarding pointer
+            let new_obj = p_new as *mut Obj;
+            debug_assert!(new_obj.tag() >= TAG_OBJECT && new_obj.tag() <= TAG_NULL);
+        }
+
+        free += p_size_words.to_bytes().as_usize();
+
+        // Thread forward pointers of the object
+        thread_fwd_pointers(p_new as *mut Obj, heap_base);
+
+        bit = bitmap_iter.next();
+    }
+
+    set_hp(free);
+}
+
+/// Thread forward pointers in object
+unsafe fn thread_fwd_pointers(obj: *mut Obj, heap_base: usize) {
+    visit_pointer_fields(
+        &mut (),
+        obj,
+        obj.tag(),
+        heap_base,
+        |_, field_addr| {
+            if (*field_addr).get_ptr() > obj as usize {
+                thread(field_addr)
+            }
+        },
+        |_, _, arr| arr.len(),
+    );
+}
+
+/// Thread a pointer field
+unsafe fn thread(field: *mut Value) {
+    // Store pointed object's header in the field, field address in the pointed object's header
+    let pointed = (*field).as_obj();
+    let pointed_header = pointed.tag();
+    *field = Value::from_raw(pointed_header);
+    (*pointed).tag = field as usize;
+}
+
+/// Unthread all references at given header, replacing with `new_loc`. Restores object header.
+unsafe fn unthread(obj: *mut Obj, new_loc: usize) {
+    let mut header = obj.tag();
+
+    // All objects and fields are word-aligned, and tags have the lowest bit set, so use the lowest
+    // bit to distinguish a header (tag) from a field address.
+    while header & 0b1 == 0 {
+        let tmp = (header as *const Obj).tag();
+        (*(header as *mut Value)) = Value::from_ptr(new_loc);
+        header = tmp;
+    }
+
+    // At the end of the chain is the original header for the object
+    debug_assert!(header >= TAG_OBJECT && header <= TAG_NULL);
+
+    (*obj).tag = header;
+}
diff --git a/rts/motoko-rts/src/gc/mark_compact/bitmap.rs b/rts/motoko-rts/src/gc/mark_compact/bitmap.rs
new file mode 100644
index 00000000000..ba7ed7483b3
--- /dev/null
+++ b/rts/motoko-rts/src/gc/mark_compact/bitmap.rs
@@ -0,0 +1,209 @@
+use crate::gc::mark_compact::TAG_BLOB_B;
+use crate::mem_utils::memzero;
+use crate::memory::{alloc_blob, Memory};
+use crate::types::{size_of, Blob, Bytes, Obj};
+
+/* How the Wasm-heap maps to the bitmap
+
+  +---- RTS stack ----+---- Motoko statics ----+---- Dynamic heap ------+ Heap limit
+                  (prefix words)                   bitmap lives here -> | BM |
+                                                                       /      \
+                                                                      /        \
+                                       each bit represents a word -> /...bits...\
+                                          in the dynamic heap
+
+## Marking with absolute addresses
+
+When marking, we need to map an address to a bit in the bitmap. Internally the
+bitmap is kept in a (non-moving) blob after the dynamic heap (DH). To
+efficiently mark the right bit in the bitmap, we maintain a pointer that points
+_before the start of the bitmap_ such that `address / 8` and `address % 8`
+will address the right bit:
+
+
+       +---- BITMAP_FORBIDDEN_PTR         +---- BITMAP_PTR
+       v                                  v
+       ,          (forbidden)             ,...................... bitmap ..........~~....,
+       |   heap_prefix_words / 8 bytes    |               heap_size / 32 bytes           |
+       |   get_bitmap_forbidden_size()    |                   BITMAP_SIZE                |
+       ^                                  ^                                              ^
+      /           ^^^^^^^^^^^              \                                             |
+     /            corresponds               \                 ^^^^^^^^^^^               /
+    /                                        \                corresponds              /
+   /                                          \                                       /
+  +---- Rts stack ----+---- Motoko statics ----+--------- Dynamic heap ----------~~--+
+                                               !
+                                               ! 32-byte aligned
+
+
+Debug assertions guard the forbidden bytes from access, as this area potentially
+overlaps with the Motoko dynamic heap.
+
+## The alignment caveat
+
+For this scheme to work, it is essential that the start of the DH is an address that
+is divisible by 32 (`heap_prefix_words % 8 == 0`). Otherwise the `address / 8`
+and `address % 8` operations on the DH's starting address will not yield the
+least significant bit in the BM.
+
+## Example calculation
+
+Assume the DH is at 0x80000. Assume heap limit being at 0xB0000. Then the BM thus
+could be placed at 0xB0008. Since the heap_prefix_words is 0x20000,
+BITMAP_FORBIDDEN_PTR = 0xB0008 - 0x20000 / 8 = 0xAC008.
+
+Now let's mark the address 0x80548 in the DH. Its absolute word number is 0x20152.
+The `(0x20152 / 8, 0x20152 % 8)`-rule gives a bit position 2 with byte offset 0x402A,
+thus we mark bit 2 in byte 0xAC004 + 0x402A = 0xB002E, which is in the BM.
+
+ */
+
+// Only designed for 32-bit.
+const _: () = assert!(core::mem::size_of::() == core::mem::size_of::());
+
+/// Current bitmap
+static mut BITMAP_FORBIDDEN_PTR: *mut u8 = core::ptr::null_mut();
+static mut BITMAP_PTR: *mut u8 = core::ptr::null_mut();
+static mut BITMAP_SIZE: usize = 0;
+
+unsafe fn get_bitmap_forbidden_size() -> usize {
+    BITMAP_PTR as usize - BITMAP_FORBIDDEN_PTR as usize
+}
+
+pub unsafe fn alloc_bitmap(
+    mem: &mut M,
+    heap_size: Bytes,
+    heap_prefix_words: usize,
+) {
+    // See Note "How the Wasm-heap maps to the bitmap" above
+    debug_assert_eq!(heap_prefix_words % 8, 0);
+    // We will have at most this many objects in the heap, each requiring a bit
+    let n_bits = heap_size.to_words().as_usize();
+    // Each byte will hold 8 bits.
+    BITMAP_SIZE = (n_bits + 7) / 8;
+    // Also round allocation up to 8-bytes to make iteration efficient. We want to be able to read
+    // 64 bits in a single read and check as many bits as possible with a single `word != 0`.
+    let bitmap_bytes = Bytes(((BITMAP_SIZE + 7) / 8) * 8);
+    // Allocating an actual object here as otherwise dump_heap gets confused
+    // No post allocation barrier as this RTS-internal blob will be collected by the GC.
+    let blob = alloc_blob(mem, TAG_BLOB_B, bitmap_bytes).get_ptr() as *mut Blob;
+    memzero(blob.payload_addr() as usize, bitmap_bytes.to_words());
+
+    BITMAP_PTR = blob.payload_addr();
+    BITMAP_FORBIDDEN_PTR = BITMAP_PTR.sub(heap_prefix_words / 8)
+}
+
+pub unsafe fn free_bitmap() {
+    BITMAP_PTR = core::ptr::null_mut();
+    BITMAP_FORBIDDEN_PTR = core::ptr::null_mut();
+    BITMAP_SIZE = 0;
+}
+
+pub unsafe fn get_bit(idx: usize) -> bool {
+    let (byte_idx, bit_idx) = (idx / 8, idx % 8);
+    debug_assert!(byte_idx >= get_bitmap_forbidden_size());
+    debug_assert!(get_bitmap_forbidden_size() + BITMAP_SIZE > byte_idx);
+    let byte = *BITMAP_FORBIDDEN_PTR.add(byte_idx);
+    (byte >> bit_idx) & 0b1 != 0
+}
+
+pub unsafe fn set_bit(idx: usize) {
+    let (byte_idx, bit_idx) = (idx / 8, idx % 8);
+    debug_assert!(byte_idx >= get_bitmap_forbidden_size());
+    debug_assert!(get_bitmap_forbidden_size() + BITMAP_SIZE > byte_idx);
+    let byte = *BITMAP_FORBIDDEN_PTR.add(byte_idx);
+    let new_byte = byte | (0b1 << bit_idx);
+    *BITMAP_FORBIDDEN_PTR.add(byte_idx) = new_byte;
+}
+
+pub struct BitmapIter {
+    /// Size of the bitmap, in bits. Does not change after initialization.
+    size: u32,
+    /// Current bit index
+    current_bit_idx: u32,
+    /// Current 64-bit word in the bitmap that we're iterating. We read in 64-bit chunks to be able
+    /// to check as many bits as possible with a single `word != 0`.
+    current_word: u64,
+    /// How many leading bits are initially zeroed in the current_word
+    leading_zeros: u32,
+}
+
+pub unsafe fn iter_bits() -> BitmapIter {
+    let blob_len_bytes = (BITMAP_PTR.sub(size_of::().to_bytes().as_usize()) as *mut Obj)
+        .as_blob()
+        .len()
+        .as_usize() as u32;
+
+    debug_assert_eq!(blob_len_bytes % 8, 0);
+
+    let blob_len_64bit_words = blob_len_bytes / 8;
+
+    let current_word = if blob_len_64bit_words == 0 {
+        0
+    } else {
+        let bitmap_ptr64 = BITMAP_PTR as *const u64;
+        bitmap_ptr64.read_unaligned()
+    };
+
+    debug_assert!(BITMAP_PTR as usize >= BITMAP_FORBIDDEN_PTR as usize);
+    let forbidden_bits = get_bitmap_forbidden_size() as u32 * 8;
+
+    BitmapIter {
+        size: blob_len_bytes * 8 + forbidden_bits,
+        current_bit_idx: forbidden_bits,
+        current_word,
+        leading_zeros: current_word.leading_zeros(),
+    }
+}
+
+/// This value marks the end-of-stream in `BitmapIter`. Using this value instead of `None` for
+/// end-of-stream reduces Wasm instructions executed by ~2.7% in some cases.
+//
+// Heap is 4GiB and each 32-bit word gets a bit, so this is one larger than the bit for the last
+// word in heap.
+//
+// (We actually need less bits than that as when the heap is full we can't allocate bitmap and mark
+// stack and can't do GC)
+pub const BITMAP_ITER_END: usize = usize::MAX;
+
+impl BitmapIter {
+    /// Returns the next bit, or `BITMAP_ITER_END` if there are no more bits set.
+    pub fn next(&mut self) -> usize {
+        debug_assert!(self.current_bit_idx <= self.size);
+
+        if self.current_bit_idx == self.size {
+            return BITMAP_ITER_END;
+        }
+
+        // Outer loop iterates 64-bit words
+        loop {
+            // Examine the least significant bit(s) in the current word
+            if self.current_word != 0 {
+                let shift_amt = self.current_word.trailing_zeros();
+                self.current_word >>= shift_amt;
+                self.current_word >>= 1;
+                let bit_idx = self.current_bit_idx + shift_amt;
+                self.current_bit_idx = bit_idx + 1;
+                return bit_idx as usize;
+            }
+
+            // Move on to next word (always 64-bit boundary)
+            self.current_bit_idx += self.leading_zeros;
+            unsafe {
+                debug_assert_eq!(
+                    (self.current_bit_idx - get_bitmap_forbidden_size() as u32 * 8) % 64,
+                    0
+                )
+            }
+            if self.current_bit_idx == self.size {
+                return BITMAP_ITER_END;
+            }
+            self.current_word = unsafe {
+                let ptr64 =
+                    BITMAP_FORBIDDEN_PTR.add(self.current_bit_idx as usize / 8) as *const u64;
+                ptr64.read_unaligned()
+            };
+            self.leading_zeros = self.current_word.leading_zeros();
+        }
+    }
+}
diff --git a/rts/motoko-rts/src/gc/mark_compact/mark_stack.rs b/rts/motoko-rts/src/gc/mark_compact/mark_stack.rs
new file mode 100644
index 00000000000..582214cc99e
--- /dev/null
+++ b/rts/motoko-rts/src/gc/mark_compact/mark_stack.rs
@@ -0,0 +1,76 @@
+//! A stack for marking heap objects (for GC). There should be no allocation after the stack
+//! otherwise things will break as we push. This invariant is checked in debug builds.
+
+use crate::gc::mark_compact::TAG_BLOB_B;
+use crate::memory::{alloc_blob, Memory};
+use crate::types::{Blob, Tag, Words};
+
+use core::ptr::null_mut;
+
+/// Initial stack size
+pub const INIT_STACK_SIZE: Words = Words(64);
+
+/// Pointer to the `blob` object for the mark stack. Used to get the capacity of the stack.
+static mut STACK_BLOB_PTR: *mut Blob = null_mut();
+
+/// Bottom of the mark stack
+pub static mut STACK_BASE: *mut usize = null_mut();
+
+/// Top of the mark stack
+pub static mut STACK_TOP: *mut usize = null_mut();
+
+/// Next free slot in the mark stack
+pub static mut STACK_PTR: *mut usize = null_mut();
+
+pub unsafe fn alloc_mark_stack(mem: &mut M) {
+    debug_assert!(STACK_BLOB_PTR.is_null());
+
+    // Allocating an actual object here to not break dump_heap
+    // No post allocation barrier as this RTS-internal blob will be collected by the GC.
+    STACK_BLOB_PTR = alloc_blob(mem, TAG_BLOB_B, INIT_STACK_SIZE.to_bytes()).get_ptr() as *mut Blob;
+    STACK_BASE = STACK_BLOB_PTR.payload_addr() as *mut usize;
+    STACK_PTR = STACK_BASE;
+    STACK_TOP = STACK_BASE.add(INIT_STACK_SIZE.as_usize());
+}
+
+pub unsafe fn free_mark_stack() {
+    STACK_BLOB_PTR = null_mut();
+    STACK_BASE = null_mut();
+    STACK_PTR = null_mut();
+    STACK_TOP = null_mut();
+}
+
+/// Doubles the stack size
+pub unsafe fn grow_stack(mem: &mut M) {
+    let stack_cap = STACK_BLOB_PTR.len().to_words();
+    let p = mem.alloc_words(stack_cap).get_ptr() as *mut usize;
+
+    // Make sure nothing was allocated after the stack
+    debug_assert_eq!(STACK_TOP, p);
+
+    let new_cap = stack_cap * 2;
+    (*STACK_BLOB_PTR).len = new_cap.to_bytes();
+    STACK_TOP = STACK_BASE.add(new_cap.as_usize());
+}
+
+pub unsafe fn push_mark_stack(mem: &mut M, obj: usize, obj_tag: Tag) {
+    // We add 2 words in a push, and `STACK_PTR` and `STACK_TOP` are both multiples of 2, so we can
+    // do simple equality check here
+    if STACK_PTR == STACK_TOP {
+        grow_stack(mem);
+    }
+
+    *STACK_PTR = obj;
+    *STACK_PTR.add(1) = obj_tag;
+    STACK_PTR = STACK_PTR.add(2);
+}
+
+pub unsafe fn pop_mark_stack() -> Option<(usize, Tag)> {
+    if STACK_PTR == STACK_BASE {
+        return None;
+    }
+    STACK_PTR = STACK_PTR.sub(2);
+    let p = *STACK_PTR;
+    let tag = *STACK_PTR.add(1);
+    return Some((p, tag));
+}
diff --git a/rts/motoko-rts/src/gc/incremental/sanity_checks/remembered_set.rs b/rts/motoko-rts/src/gc/remembered_set.rs
similarity index 88%
rename from rts/motoko-rts/src/gc/incremental/sanity_checks/remembered_set.rs
rename to rts/motoko-rts/src/gc/remembered_set.rs
index 9904244af18..a88c5fd7d6e 100644
--- a/rts/motoko-rts/src/gc/incremental/sanity_checks/remembered_set.rs
+++ b/rts/motoko-rts/src/gc/remembered_set.rs
@@ -42,7 +42,7 @@ use crate::types::{block_size, Blob, Bytes, Value, TAG_BLOB_B};
 
 pub struct RememberedSet {
     hash_table: *mut Blob,
-    count: u32, // contained entries
+    count: usize, // contained entries
 }
 
 #[repr(C)]
@@ -59,13 +59,13 @@ struct CollisionNode {
 
 pub struct RememberedSetIterator {
     hash_table: *mut Blob,
-    hash_index: u32,
+    hash_index: usize,
     current_entry: *mut HashEntry,
 }
 
-pub const INITIAL_TABLE_LENGTH: u32 = 1024;
-const GROWTH_FACTOR: u32 = 2;
-pub const OCCUPATION_THRESHOLD_PERCENT: u32 = 65;
+pub const INITIAL_TABLE_LENGTH: usize = 1024;
+const GROWTH_FACTOR: usize = 2;
+pub const OCCUPATION_THRESHOLD_PERCENT: usize = 65;
 
 impl RememberedSet {
     pub unsafe fn new(mem: &mut M) -> RememberedSet {
@@ -126,7 +126,7 @@ impl RememberedSet {
         false
     }
 
-    pub unsafe fn hash_index(&self, value: Value) -> u32 {
+    pub unsafe fn hash_index(&self, value: Value) -> usize {
         // Future optimization: Use bitwise modulo, check for power of 2
         let raw = value.get_raw();
         let length = table_length(self.hash_table);
@@ -138,7 +138,7 @@ impl RememberedSet {
         RememberedSetIterator::init(self)
     }
 
-    pub fn count(&self) -> u32 {
+    pub fn count(&self) -> usize {
         self.count
     }
 
@@ -222,10 +222,9 @@ impl RememberedSetIterator {
     }
 }
 
-unsafe fn new_table(mem: &mut M, size: u32) -> *mut Blob {
+unsafe fn new_table(mem: &mut M, size: usize) -> *mut Blob {
     // No post allocation barrier as this RTS-internal blob will be collected by the GC.
-    let table =
-        alloc_blob(mem, TAG_BLOB_B, Bytes(size * size_of::() as u32)).as_blob_mut();
+    let table = alloc_blob(mem, TAG_BLOB_B, Bytes(size * size_of::())).as_blob_mut();
     for index in 0..size {
         table_set(table, index, null_ptr_value());
     }
@@ -235,7 +234,7 @@ unsafe fn new_table(mem: &mut M, size: u32) -> *mut Blob {
 unsafe fn new_collision_node(mem: &mut M, value: Value) -> *mut CollisionNode {
     debug_assert!(!is_null_ptr_value(value));
     // No post allocation barrier as this RTS-internal blob will be collected by the GC.
-    let node = alloc_blob(mem, TAG_BLOB_B, Bytes(size_of::() as u32)).as_blob_mut()
+    let node = alloc_blob(mem, TAG_BLOB_B, Bytes(size_of::())).as_blob_mut()
         as *mut CollisionNode;
     (*node).entry = HashEntry {
         value,
@@ -244,31 +243,30 @@ unsafe fn new_collision_node(mem: &mut M, value: Value) -> *mut Colli
     node
 }
 
-unsafe fn table_get(table: *mut Blob, index: u32) -> *mut HashEntry {
+unsafe fn table_get(table: *mut Blob, index: usize) -> *mut HashEntry {
     debug_assert!(table != null_mut());
-    let entry =
-        (table.payload_addr() as u32 + index * size_of::() as u32) as *mut HashEntry;
+    let entry = (table.payload_addr() as usize + index * size_of::()) as *mut HashEntry;
     debug_assert!(
-        entry as u32 + size_of::() as u32
-            <= table as u32 + block_size(table as usize).to_bytes().as_u32()
+        entry as usize + size_of::()
+            <= table as usize + block_size(table as usize).to_bytes().as_usize()
     );
     entry
 }
 
-unsafe fn table_set(table: *mut Blob, index: u32, value: Value) {
+unsafe fn table_set(table: *mut Blob, index: usize, value: Value) {
     let entry = table_get(table, index);
     (*entry).value = value;
     (*entry).next_collision_ptr = null_mut();
 }
 
-unsafe fn table_length(table: *mut Blob) -> u32 {
+unsafe fn table_length(table: *mut Blob) -> usize {
     debug_assert!(table != null_mut());
-    debug_assert!(table.len().as_u32() % size_of::() as u32 == 0);
-    table.len().as_u32() / size_of::() as u32
+    debug_assert!(table.len().as_usize() % size_of::() == 0);
+    table.len().as_usize() / size_of::()
 }
 
 unsafe fn null_ptr_value() -> Value {
-    Value::from_raw((null_mut() as *mut usize) as u32)
+    Value::from_raw((null_mut() as *mut usize) as usize)
 }
 
 unsafe fn is_null_ptr_value(value: Value) -> bool {
diff --git a/rts/motoko-rts/src/idl.rs b/rts/motoko-rts/src/idl.rs
index 6435eb1c5e1..cfca1b9bfc2 100644
--- a/rts/motoko-rts/src/idl.rs
+++ b/rts/motoko-rts/src/idl.rs
@@ -1,16 +1,21 @@
 #![allow(non_upper_case_globals)]
+
 use crate::bitrel::BitRel;
 use crate::buf::{read_byte, read_word, skip_leb128, Buf};
 use crate::idl_trap_with;
-use crate::leb128::{leb128_decode, sleb128_decode};
+
 use crate::memory::{alloc_blob, Memory};
-use crate::persistence::compatibility::TypeDescriptor;
-use crate::types::{Value, Words, TAG_BLOB_B};
+use crate::types::{Words, TAG_BLOB_B};
 use crate::utf8::utf8_validate;
 
 use core::cmp::min;
 
-use motoko_rts_macros::ic_mem_fn;
+use motoko_rts_macros::{enhanced_orthogonal_persistence, ic_mem_fn};
+
+use crate::libc_declarations::{c_void, memcmp};
+
+#[enhanced_orthogonal_persistence]
+use crate::types::Value;
 
 extern "C" {
     // check instruction decoding limit, exported by moc
@@ -57,9 +62,23 @@ const IDL_CON_alias: i32 = 1;
 const IDL_PRIM_lowest: i32 = -17;
 
 // Only used for memory compatiblity checks for orthogonal persistence.
+#[enhanced_orthogonal_persistence]
 const IDL_EXT_blob: i32 = -129;
+#[enhanced_orthogonal_persistence]
 const IDL_EXT_tuple: i32 = -130;
 
+unsafe fn leb128_decode(buf: *mut Buf) -> u32 {
+    let value = crate::leb128::leb128_decode(buf);
+    assert!(value <= u32::MAX as usize);
+    value as u32
+}
+
+unsafe fn sleb128_decode(buf: *mut Buf) -> i32 {
+    let value = crate::leb128::sleb128_decode(buf);
+    assert!(value >= i32::MIN as isize && value <= i32::MAX as isize);
+    value as i32
+}
+
 pub unsafe fn leb128_decode_ptr(buf: *mut Buf) -> (u32, *mut u8) {
     (leb128_decode(buf), (*buf).ptr)
 }
@@ -70,7 +89,8 @@ enum CompatibilityMode {
     PureCandid,
     /// Candidish stabilization (old stabilization format).
     CandidishStabilization,
-    /// Memory compatibility of orthogonal persistence.
+    /// Memory compatibility of orthogonal persistence (with or without graph copying).
+    #[cfg(feature = "enhanced_orthogonal_persistence")]
     MemoryCompatibility,
 }
 
@@ -84,18 +104,15 @@ unsafe fn is_primitive_type(mode: CompatibilityMode, ty: i32) -> bool {
     match mode {
         CompatibilityMode::PureCandid => false,
         CompatibilityMode::CandidishStabilization => ty == IDL_EXT_region,
+        #[cfg(feature = "enhanced_orthogonal_persistence")]
         CompatibilityMode::MemoryCompatibility => ty == IDL_EXT_region || ty == IDL_EXT_blob,
     }
 }
 
 // TBR; based on Text.text_compare
-unsafe fn utf8_cmp(len1: u32, p1: *mut u8, len2: u32, p2: *mut u8) -> i32 {
+unsafe fn utf8_cmp(len1: usize, p1: *mut u8, len2: usize, p2: *mut u8) -> i32 {
     let len = min(len1, len2);
-    let cmp = libc::memcmp(
-        p1 as *mut libc::c_void,
-        p2 as *mut libc::c_void,
-        len as usize,
-    );
+    let cmp = memcmp(p1 as *mut c_void, p2 as *mut c_void, len);
     if cmp != 0 {
         return cmp;
     } else if len1 > len {
@@ -129,7 +146,7 @@ unsafe fn parse_fields(mode: CompatibilityMode, buf: *mut Buf, n_types: u32) {
 
 // NB. This function assumes the allocation does not need to survive GC
 // Therefore, no post allocation barrier is applied.
-unsafe fn alloc(mem: &mut M, size: Words) -> *mut u8 {
+unsafe fn alloc(mem: &mut M, size: Words) -> *mut u8 {
     alloc_blob(mem, TAG_BLOB_B, size.to_bytes())
         .as_blob_mut()
         .payload_addr()
@@ -158,7 +175,7 @@ unsafe fn parse_idl_header(
     extended: bool,
     buf: *mut Buf,
     typtbl_out: *mut *mut *mut u8,
-    typtbl_size_out: *mut u32,
+    typtbl_size_out: *mut usize,
     main_types_out: *mut *mut u8,
 ) {
     let mode = if extended {
@@ -187,10 +204,10 @@ unsafe fn parse_idl_header(
     }
 
     // Let the caller know about the table size
-    *typtbl_size_out = n_types;
+    *typtbl_size_out = n_types as usize;
 
     // Allocate the type table to be passed out
-    let typtbl: *mut *mut u8 = alloc(mem, Words(n_types)) as *mut _;
+    let typtbl: *mut *mut u8 = alloc(mem, Words(n_types as usize)) as *mut _;
 
     // Go through the table
     for i in 0..n_types {
@@ -247,14 +264,14 @@ unsafe fn parse_idl_header(
             for _ in 0..leb128_decode(buf) {
                 // Name
                 let (len, p) = leb128_decode_ptr(buf);
-                buf.advance(len);
+                buf.advance(len as usize);
                 // Method names must be valid unicode
-                utf8_validate(p as *const _, len);
+                utf8_validate(p as *const _, len as usize);
                 // Method names must be in order
                 if last_p != core::ptr::null_mut() {
-                    let cmp = libc::memcmp(
-                        last_p as *mut libc::c_void,
-                        p as *mut libc::c_void,
+                    let cmp = memcmp(
+                        last_p as *mut c_void,
+                        p as *mut c_void,
                         min(last_len, len) as usize,
                     );
                     if cmp > 0 || (cmp == 0 && last_len >= len) {
@@ -271,7 +288,7 @@ unsafe fn parse_idl_header(
         } else {
             // Future type
             let n = leb128_decode(buf);
-            buf.advance(n);
+            buf.advance(n as usize);
         }
     }
 
@@ -291,7 +308,7 @@ unsafe fn parse_idl_header(
             for _ in 0..leb128_decode(&mut tmp_buf) {
                 // Name
                 let len = leb128_decode(&mut tmp_buf);
-                Buf::advance(&mut tmp_buf, len);
+                Buf::advance(&mut tmp_buf, len as usize);
                 // Type
                 let t = sleb128_decode(&mut tmp_buf);
                 if !(t >= 0 && (t as u32) < n_types) {
@@ -330,13 +347,13 @@ unsafe fn read_byte_tag(buf: *mut Buf) -> u8 {
 
 unsafe fn skip_blob(buf: *mut Buf) {
     let len = leb128_decode(buf);
-    buf.advance(len);
+    buf.advance(len as usize);
 }
 
 unsafe fn skip_text(buf: *mut Buf) {
     let (len, p) = leb128_decode_ptr(buf);
-    buf.advance(len); // advance first; does the bounds check
-    utf8_validate(p as *const _, len);
+    buf.advance(len as usize); // advance first; does the bounds check
+    utf8_validate(p as *const _, len as usize);
 }
 
 unsafe fn skip_any_vec(buf: *mut Buf, typtbl: *mut *mut u8, t: i32, count: u32) {
@@ -492,7 +509,7 @@ unsafe extern "C" fn skip_any(buf: *mut Buf, typtbl: *mut *mut u8, t: i32, depth
                 // Future type
                 let n_data = leb128_decode(buf);
                 let n_ref = leb128_decode(buf);
-                buf.advance(n_data);
+                buf.advance(n_data as usize);
                 if n_ref > 0 {
                     idl_trap_with("skip_any: skipping references");
                 }
@@ -535,7 +552,7 @@ unsafe extern "C" fn find_field(
     typtbl: *mut *mut u8,
     tag: u32,
     n: *mut u8,
-) -> u32 {
+) -> bool {
     while *n > 0 {
         let last_p = (*tb).ptr;
         let this_tag = leb128_decode(tb);
@@ -545,15 +562,15 @@ unsafe extern "C" fn find_field(
             *n -= 1;
         } else if tag == this_tag {
             *n -= 1;
-            return 1;
+            return true;
         } else {
             // Rewind reading tag
             (*tb).ptr = last_p;
-            return 0;
+            return false;
         }
     }
 
-    0
+    false
 }
 
 #[no_mangle]
@@ -584,6 +601,7 @@ unsafe fn is_null_opt_reserved(typtbl: *mut *mut u8, end: *mut u8, t: i32) -> bo
     return t == IDL_CON_opt;
 }
 
+#[enhanced_orthogonal_persistence]
 #[derive(PartialEq, Clone, Copy, Debug)]
 pub(crate) enum TypeVariance {
     Covariance,
@@ -591,6 +609,7 @@ pub(crate) enum TypeVariance {
     Invariance,
 }
 
+#[enhanced_orthogonal_persistence]
 impl TypeVariance {
     fn invert(self) -> TypeVariance {
         match self {
@@ -601,7 +620,13 @@ impl TypeVariance {
     }
 }
 
-unsafe fn recurring_memory_check(cache: &BitRel, variance: TypeVariance, t1: u32, t2: u32) -> bool {
+#[enhanced_orthogonal_persistence]
+unsafe fn recurring_memory_check(
+    cache: &BitRel,
+    variance: TypeVariance,
+    t1: usize,
+    t2: usize,
+) -> bool {
     match variance {
         TypeVariance::Covariance => cache.visited(true, t1, t2),
         TypeVariance::Contravariance => cache.visited(false, t1, t2),
@@ -609,7 +634,8 @@ unsafe fn recurring_memory_check(cache: &BitRel, variance: TypeVariance, t1: u32
     }
 }
 
-unsafe fn remember_memory_check(cache: &BitRel, variance: TypeVariance, t1: u32, t2: u32) {
+#[enhanced_orthogonal_persistence]
+unsafe fn remember_memory_check(cache: &BitRel, variance: TypeVariance, t1: usize, t2: usize) {
     match variance {
         TypeVariance::Covariance => cache.visit(true, t1, t2),
         TypeVariance::Contravariance => cache.visit(false, t1, t2),
@@ -620,7 +646,7 @@ unsafe fn remember_memory_check(cache: &BitRel, variance: TypeVariance, t1: u32,
     }
 }
 
-/// Memory compatibility check for orthogonal persistence.
+/// Memory compatibility check for orthogonal persistence (with or without graph copying).
 /// Checks whether the new type (`typetbl2`) is compatible to the old type (`typetbl1`).
 /// The implementation is similar to the Candid sub-type test `sub()` below, however,
 /// with some relevant differences w.r.t. the permitted type relations:
@@ -630,6 +656,7 @@ unsafe fn remember_memory_check(cache: &BitRel, variance: TypeVariance, t1: u32,
 /// * Records cannot introduce additional optional fields.
 /// * Same arity for tuple types.
 /// * Records and tuples are distinct.
+#[enhanced_orthogonal_persistence]
 pub(crate) unsafe fn memory_compatible(
     rel: &BitRel,
     variance: TypeVariance,
@@ -645,8 +672,8 @@ pub(crate) unsafe fn memory_compatible(
     // i.e. new actor fields can be inserted in new program versions.
     // The `main_actor` flag only occurs non-recursively at the top level of the memory compatibility check.
     if !main_actor && t1 >= 0 && t2 >= 0 {
-        let t1 = t1 as u32;
-        let t2 = t2 as u32;
+        let t1 = t1 as usize;
+        let t2 = t2 as usize;
         if recurring_memory_check(rel, variance, t1, t2) {
             return true;
         };
@@ -894,7 +921,7 @@ pub(crate) unsafe fn memory_compatible(
                     return false;
                 };
                 let (len2, p2) = leb128_decode_ptr(&mut tb2);
-                Buf::advance(&mut tb2, len2);
+                Buf::advance(&mut tb2, len2 as usize);
                 let t21 = sleb128_decode(&mut tb2);
                 let mut len1: u32;
                 let mut p1: *mut u8;
@@ -902,10 +929,10 @@ pub(crate) unsafe fn memory_compatible(
                 let mut cmp: i32;
                 loop {
                     (len1, p1) = leb128_decode_ptr(&mut tb1);
-                    Buf::advance(&mut tb1, len1);
+                    Buf::advance(&mut tb1, len1 as usize);
                     t11 = sleb128_decode(&mut tb1);
                     n1 -= 1;
-                    cmp = utf8_cmp(len1, p1, len2, p2);
+                    cmp = utf8_cmp(len1 as usize, p1, len2 as usize, p2);
                     if variance != TypeVariance::Invariance && cmp < 0 && n1 > 0 {
                         continue;
                     };
@@ -938,8 +965,8 @@ pub(crate) unsafe fn sub(
     t2: i32,
 ) -> bool {
     if t1 >= 0 && t2 >= 0 {
-        let t1 = t1 as u32;
-        let t2 = t2 as u32;
+        let t1 = t1 as usize;
+        let t2 = t2 as usize;
         if rel.visited(p, t1, t2) {
             // visited? (bit 0)
             // return assumed or determined result
@@ -1161,7 +1188,7 @@ pub(crate) unsafe fn sub(
                         break 'return_false;
                     };
                     let (len2, p2) = leb128_decode_ptr(&mut tb2);
-                    Buf::advance(&mut tb2, len2);
+                    Buf::advance(&mut tb2, len2 as usize);
                     let t21 = sleb128_decode(&mut tb2);
                     let mut len1: u32;
                     let mut p1: *mut u8;
@@ -1169,10 +1196,10 @@ pub(crate) unsafe fn sub(
                     let mut cmp: i32;
                     loop {
                         (len1, p1) = leb128_decode_ptr(&mut tb1);
-                        Buf::advance(&mut tb1, len1);
+                        Buf::advance(&mut tb1, len1 as usize);
                         t11 = sleb128_decode(&mut tb1);
                         n1 -= 1;
-                        cmp = utf8_cmp(len1, p1, len2, p2);
+                        cmp = utf8_cmp(len1 as usize, p1, len2 as usize, p2);
                         if cmp < 0 && n1 > 0 {
                             continue;
                         };
@@ -1195,19 +1222,23 @@ pub(crate) unsafe fn sub(
     }
     // remember negative result ...
     if t1 >= 0 && t2 >= 0 {
-        rel.disprove(p, t1 as u32, t2 as u32);
+        rel.disprove(p, t1 as usize, t2 as usize);
     }
     // .. only then return false
     return false;
 }
 
 #[no_mangle]
-unsafe extern "C" fn idl_sub_buf_words(typtbl_size1: u32, typtbl_size2: u32) -> u32 {
+unsafe extern "C" fn idl_sub_buf_words(typtbl_size1: usize, typtbl_size2: usize) -> usize {
     return BitRel::words(typtbl_size1, typtbl_size2);
 }
 
 #[no_mangle]
-unsafe extern "C" fn idl_sub_buf_init(rel_buf: *mut u32, typtbl_size1: u32, typtbl_size2: u32) {
+unsafe extern "C" fn idl_sub_buf_init(
+    rel_buf: *mut usize,
+    typtbl_size1: usize,
+    typtbl_size2: usize,
+) {
     let rel = BitRel {
         ptr: rel_buf,
         end: rel_buf.add(idl_sub_buf_words(typtbl_size1, typtbl_size2) as usize),
@@ -1217,6 +1248,7 @@ unsafe extern "C" fn idl_sub_buf_init(rel_buf: *mut u32, typtbl_size1: u32, typt
     rel.init();
 }
 
+#[enhanced_orthogonal_persistence]
 #[ic_mem_fn]
 unsafe fn idl_alloc_typtbl(
     mem: &mut M,
@@ -1224,27 +1256,29 @@ unsafe fn idl_alloc_typtbl(
     type_offsets: Value,
     typtbl_out: *mut *mut *mut u8,
     typtbl_end_out: *mut *mut u8,
-    typtbl_size_out: *mut u32,
+    typtbl_size_out: *mut usize,
 ) {
+    use crate::persistence::compatibility::TypeDescriptor;
+
     let mut type_descriptor = TypeDescriptor::new(candid_data, type_offsets);
     *typtbl_out = type_descriptor.build_type_table(mem);
     *typtbl_end_out = type_descriptor.type_table_end();
-    *typtbl_size_out = type_descriptor.type_count() as u32;
+    *typtbl_size_out = type_descriptor.type_count();
 }
 
 #[no_mangle]
 unsafe extern "C" fn idl_sub(
-    rel_buf: *mut u32, // a buffer with at least 2 * typtbl_size1 * typtbl_size2 bits
+    rel_buf: *mut usize, // a buffer with at least 2 * typtbl_size1 * typtbl_size2 bits
     typtbl1: *mut *mut u8,
     typtbl2: *mut *mut u8,
     typtbl_end1: *mut u8,
     typtbl_end2: *mut u8,
-    typtbl_size1: u32,
-    typtbl_size2: u32,
+    typtbl_size1: usize,
+    typtbl_size2: usize,
     t1: i32,
     t2: i32,
 ) -> bool {
-    debug_assert!(rel_buf != (0 as *mut u32));
+    debug_assert!(rel_buf != (0 as *mut usize));
     debug_assert!(typtbl1 != (0 as *mut *mut u8));
     debug_assert!(typtbl2 != (0 as *mut *mut u8));
     debug_assert!(typtbl_end1 != (0 as *mut u8));
@@ -1256,9 +1290,7 @@ unsafe extern "C" fn idl_sub(
         size1: typtbl_size1,
         size2: typtbl_size2,
     };
-
     debug_assert!(t1 < (typtbl_size1 as i32) && t2 < (typtbl_size2 as i32));
-
     return sub(
         &rel,
         true,
diff --git a/rts/motoko-rts/src/leb128.rs b/rts/motoko-rts/src/leb128.rs
index 7df17efa77d..76a22eb1a2f 100644
--- a/rts/motoko-rts/src/leb128.rs
+++ b/rts/motoko-rts/src/leb128.rs
@@ -3,7 +3,7 @@
 use crate::buf::{read_byte, Buf};
 
 #[no_mangle]
-pub unsafe extern "C" fn leb128_encode(mut val: u32, mut buf: *mut u8) {
+pub unsafe extern "C" fn leb128_encode(mut val: usize, mut buf: *mut u8) {
     loop {
         let byte = (val & 0b0111_1111) as u8;
         val >>= 7;
@@ -18,7 +18,7 @@ pub unsafe extern "C" fn leb128_encode(mut val: u32, mut buf: *mut u8) {
 }
 
 #[no_mangle]
-pub unsafe extern "C" fn sleb128_encode(mut val: i32, mut buf: *mut u8) {
+pub unsafe extern "C" fn sleb128_encode(mut val: isize, mut buf: *mut u8) {
     loop {
         let byte = (val & 0b0111_1111) as u8;
         val >>= 7;
@@ -34,23 +34,37 @@ pub unsafe extern "C" fn sleb128_encode(mut val: i32, mut buf: *mut u8) {
 }
 
 #[no_mangle]
-pub unsafe extern "C" fn leb128_decode(buf: *mut Buf) -> u32 {
+pub unsafe extern "C" fn leb128_decode(buf: *mut Buf) -> usize {
     leb128_decode_checked(buf).expect("leb128_decode: overflow")
 }
 
 /// Returns `None` on overflow
-pub unsafe fn leb128_decode_checked(buf: *mut Buf) -> Option {
+pub unsafe fn leb128_decode_checked(buf: *mut Buf) -> Option {
     let mut result = 0;
     let mut shift = 0;
 
     loop {
         let byte = read_byte(buf);
 
-        result |= ((byte & 0b0111_1111) as u32) << shift;
-
-        // The 5th byte needs to be the last, and it must contribute at most 4 bits, otherwise we
+        result |= ((byte & 0b0111_1111) as usize) << shift;
+
+        let overflow = match usize::BITS {
+            u64::BITS => {
+                // The 10th byte needs to be the last, and it must contribute at most 1 bit, otherwise we
+                // have an overflow.
+                shift == 63 && (byte & 0b1111_1110) != 0
+            }
+            u32::BITS => {
+                // The 5th byte needs to be the last, and it must contribute at most 4 bits, otherwise we
+                // have an overflow.
+                shift == 28 && (byte & 0b1111_0000) != 0
+            }
+            _ => unreachable!(),
+        };
+
+        // The 10th byte needs to be the last, and it must contribute at most 1 bit, otherwise we
         // have an overflow
-        if shift == 28 && (byte & 0b1111_0000) != 0 {
+        if overflow {
             return None;
         }
 
@@ -65,23 +79,33 @@ pub unsafe fn leb128_decode_checked(buf: *mut Buf) -> Option {
 }
 
 #[no_mangle]
-pub unsafe extern "C" fn sleb128_decode(buf: *mut Buf) -> i32 {
+pub unsafe extern "C" fn sleb128_decode(buf: *mut Buf) -> isize {
     sleb128_decode_checked(buf).expect("sleb128_decode: overflow")
 }
 
 /// Returns `None` on overflow
-pub unsafe fn sleb128_decode_checked(buf: *mut Buf) -> Option {
+pub unsafe fn sleb128_decode_checked(buf: *mut Buf) -> Option {
     let mut result = 0;
     let mut shift = 0;
 
     let last_byte = loop {
         let byte = read_byte(buf);
 
-        result |= ((byte & 0b0111_1111) as i32) << shift;
+        result |= ((byte & 0b0111_1111) as isize) << shift;
 
         // Overflow check ported from Wasm reference implementation:
         // https://github.com/WebAssembly/spec/blob/f9770eb75117cac0c878feaa5eaf4a4d9dda61f5/interpreter/binary/decode.ml#L89-L98
-        if shift == 28 && (byte & 0b0111_1000 != 0 && byte & 0b0111_1000 != 0b0111_1000) {
+        let overflow = match usize::BITS {
+            u64::BITS => {
+                shift == 63 && (byte & 0b0111_1111 != 0 && byte & 0b0111_1111 != 0b0111_1111)
+            }
+            u32::BITS => {
+                shift == 28 && (byte & 0b0111_1000 != 0 && byte & 0b0111_1000 != 0b0111_1000)
+            }
+            _ => unreachable!(),
+        };
+
+        if overflow {
             return None;
         }
 
@@ -93,7 +117,7 @@ pub unsafe fn sleb128_decode_checked(buf: *mut Buf) -> Option {
     };
 
     // Sign extend
-    if shift < 32 && last_byte & 0b0100_0000 != 0 {
+    if shift < usize::BITS && last_byte & 0b0100_0000 != 0 {
         result |= !0 << shift;
     }
 
diff --git a/rts/motoko-rts/src/lib.rs b/rts/motoko-rts/src/lib.rs
index 52130eba35f..e3579e31a8d 100644
--- a/rts/motoko-rts/src/lib.rs
+++ b/rts/motoko-rts/src/lib.rs
@@ -5,7 +5,10 @@
     arbitrary_self_types,
     core_intrinsics,
     panic_info_message,
-    proc_macro_hygiene
+    proc_macro_hygiene,
+    // // We do not need simd but this flag enables `core::arch:wasm64`.
+    // // See https://github.com/rust-lang/rust/issues/90599
+    simd_wasm64
 )]
 
 // c.f. https://os.phil-opp.com/heap-allocation/#dynamic-memory
@@ -36,16 +39,21 @@ pub mod gc;
 #[cfg(feature = "ic")]
 mod idl;
 pub mod leb128;
-mod mem_utils;
+mod libc_declarations;
+pub mod mem_utils;
 pub mod memory;
 #[cfg(feature = "ic")]
+#[enhanced_orthogonal_persistence]
 pub mod persistence;
 pub mod principal_id;
 #[cfg(feature = "ic")]
 pub mod region;
-#[cfg(feature = "ic")]
-mod stable_mem;
+#[enhanced_orthogonal_persistence]
+pub mod stabilization;
+pub mod stable_mem;
 mod static_checks;
+#[classical_persistence]
+pub mod stream;
 pub mod text;
 pub mod text_iter;
 mod tommath_bindings;
@@ -53,8 +61,6 @@ pub mod types;
 pub mod utf8;
 mod visitor;
 
-use types::Bytes;
-
 use motoko_rts_macros::*;
 
 #[ic_mem_fn(ic_only)]
@@ -62,13 +68,20 @@ unsafe fn version(mem: &mut M) -> types::Value {
     text::text_of_str(mem, "0.1")
 }
 
+#[non_incremental_gc]
+#[ic_mem_fn(ic_only)]
+unsafe fn alloc_words(mem: &mut M, n: types::Words) -> types::Value {
+    mem.alloc_words(n)
+}
+
+#[incremental_gc]
 #[ic_mem_fn(ic_only)]
-unsafe fn alloc_words(mem: &mut M, n: types::Words) -> types::Value {
+unsafe fn alloc_words(mem: &mut M, n: types::Words) -> types::Value {
     crate::gc::incremental::get_partitioned_heap().allocate(mem, n)
 }
 
 extern "C" {
-    fn rts_trap(msg: *const u8, len: Bytes) -> !;
+    fn rts_trap(msg: *const u8, len: u32) -> !;
 }
 
 pub(crate) unsafe fn trap_with_prefix(prefix: &str, msg: &str) -> ! {
@@ -99,7 +112,8 @@ pub(crate) unsafe fn trap_with_prefix(prefix: &str, msg: &str) -> ! {
         b_idx += 1;
     }
 
-    rts_trap(c_str.as_ptr(), Bytes(b_idx as u32));
+    assert!(b_idx <= u32::MAX as usize);
+    rts_trap(c_str.as_ptr(), b_idx as u32);
 }
 
 pub(crate) unsafe fn idl_trap_with(msg: &str) -> ! {
diff --git a/rts/motoko-rts/src/libc_declarations.rs b/rts/motoko-rts/src/libc_declarations.rs
new file mode 100644
index 00000000000..25a8b3c181c
--- /dev/null
+++ b/rts/motoko-rts/src/libc_declarations.rs
@@ -0,0 +1,36 @@
+// Declarations adopted from https://github.com/rust-lang/libc/blob/main/src/wasi.rs.
+#![allow(non_camel_case_types)]
+
+use motoko_rts_macros::classical_persistence;
+use motoko_rts_macros::enhanced_orthogonal_persistence;
+
+pub(crate) type c_void = core::ffi::c_void;
+pub(crate) type size_t = usize;
+pub(crate) type c_char = i8;
+pub(crate) type c_int = i32;
+
+#[classical_persistence]
+#[cfg(feature = "ic")]
+pub(crate) type c_double = f64;
+
+#[classical_persistence]
+pub(crate) unsafe fn memcpy(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void {
+    libc::memcpy(dest, src, n)
+}
+
+#[classical_persistence]
+pub(crate) unsafe fn memset(dest: *mut c_void, c: c_int, n: size_t) -> *mut c_void {
+    libc::memset(dest, c, n)
+}
+
+#[classical_persistence]
+pub(crate) unsafe fn memcmp(cx: *const c_void, ct: *const c_void, n: size_t) -> c_int {
+    libc::memcmp(cx, ct, n)
+}
+
+#[enhanced_orthogonal_persistence]
+extern "C" {
+    pub(crate) fn memcpy(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void;
+    pub(crate) fn memset(dest: *mut c_void, c: c_int, n: size_t) -> *mut c_void;
+    pub(crate) fn memcmp(cx: *const c_void, ct: *const c_void, n: size_t) -> c_int;
+}
diff --git a/rts/motoko-rts/src/mem_utils.rs b/rts/motoko-rts/src/mem_utils.rs
index 37b1d07e1ce..67bbc4c0207 100644
--- a/rts/motoko-rts/src/mem_utils.rs
+++ b/rts/motoko-rts/src/mem_utils.rs
@@ -1,18 +1,20 @@
 use crate::types::{Bytes, Words};
 
-pub(crate) unsafe fn memcpy_words(to: usize, from: usize, n: Words) {
-    libc::memcpy(to as *mut _, from as *const _, n.to_bytes().as_usize());
+use crate::libc_declarations::{memcpy, memset};
+
+pub unsafe fn memcpy_words(to: usize, from: usize, n: Words) {
+    memcpy(to as *mut _, from as *const _, n.to_bytes().as_usize());
 }
 
-pub(crate) unsafe fn memcpy_bytes(to: usize, from: usize, n: Bytes) {
-    libc::memcpy(to as *mut _, from as *const _, n.as_usize());
+pub unsafe fn memcpy_bytes(to: usize, from: usize, n: Bytes) {
+    memcpy(to as *mut _, from as *const _, n.as_usize());
 }
 
-pub(crate) unsafe fn memzero(to: usize, n: Words) {
-    libc::memset(to as *mut _, 0, n.to_bytes().as_usize());
+pub unsafe fn memzero(to: usize, n: Words) {
+    memset(to as *mut _, 0, n.to_bytes().as_usize());
 }
 
 #[cfg(feature = "ic")]
-pub(crate) unsafe fn memzero_bytes(to: usize, n: Bytes) {
-    libc::memset(to as *mut _, 0, n.as_usize());
+pub unsafe fn memzero_bytes(to: usize, n: Bytes) {
+    memset(to as *mut _, 0, n.as_usize());
 }
diff --git a/rts/motoko-rts/src/memory.rs b/rts/motoko-rts/src/memory.rs
index 15a272263e5..ad1f0e75bef 100644
--- a/rts/motoko-rts/src/memory.rs
+++ b/rts/motoko-rts/src/memory.rs
@@ -1,14 +1,24 @@
 #[cfg(feature = "ic")]
 pub mod ic;
+use crate::{constants::MAX_ARRAY_LENGTH_FOR_ITERATOR, types::*};
 
-use crate::constants::MAX_ARRAY_SIZE;
-use crate::rts_trap_with;
-use crate::types::*;
+use motoko_rts_macros::classical_persistence;
+use motoko_rts_macros::enhanced_orthogonal_persistence;
+use motoko_rts_macros::ic_mem_fn;
 
 #[cfg(feature = "ic")]
 use crate::constants::MB;
 
-use motoko_rts_macros::ic_mem_fn;
+#[enhanced_orthogonal_persistence]
+use crate::constants::GB;
+
+// TODO: Redesign for 64-bit support by using a dynamic partition list.
+/// Currently limited to 64 GB.
+#[enhanced_orthogonal_persistence]
+pub const MAXIMUM_MEMORY_SIZE: Bytes = Bytes(64u64 * GB as u64);
+
+#[classical_persistence]
+pub const MAXIMUM_MEMORY_SIZE: Bytes = Bytes(usize::MAX as u64);
 
 // Memory reserve in bytes ensured during update and initialization calls.
 // For use by queries and upgrade calls.
@@ -35,17 +45,17 @@ pub(crate) const GENERAL_MEMORY_RESERVE: usize = 256 * MB;
 /// This function does not take any `Memory` arguments can be used by the generated code.
 pub trait Memory {
     // General allocator working for all GC variants.
-    unsafe fn alloc_words(&mut self, n: Words) -> Value;
+    unsafe fn alloc_words(&mut self, n: Words) -> Value;
 
     // Grow the allocated memory size to at least the address of `ptr`.
-    unsafe fn grow_memory(&mut self, ptr: u64);
+    unsafe fn grow_memory(&mut self, ptr: usize);
 }
 
 /// Allocate a new blob.
 /// Note: After initialization, the post allocation barrier needs to be applied to all mutator objects.
 /// For RTS-internal blobs that can be collected by the next GC run, the post allocation barrier can be omitted.
 #[ic_mem_fn]
-pub unsafe fn alloc_blob(mem: &mut M, tag: u32, size: Bytes) -> Value {
+pub unsafe fn alloc_blob(mem: &mut M, tag: Tag, size: Bytes) -> Value {
     debug_assert!(is_blob_tag(tag));
     let ptr = mem.alloc_words(size_of::() + size.to_words());
     // NB. Cannot use `as_blob` here as we didn't write the header yet
@@ -60,12 +70,9 @@ pub unsafe fn alloc_blob(mem: &mut M, tag: u32, size: Bytes) ->
 /// Allocate a new array.
 /// Note: After initialization, the post allocation barrier needs to be applied to all mutator objects.
 #[ic_mem_fn]
-pub unsafe fn alloc_array(mem: &mut M, tag: u32, len: u32) -> Value {
+pub unsafe fn alloc_array(mem: &mut M, tag: Tag, len: usize) -> Value {
     debug_assert!(is_base_array_tag(tag));
-    // Array payload should not be larger than half of the memory
-    if len > MAX_ARRAY_SIZE {
-        rts_trap_with("Array allocation too large");
-    }
+    assert!(len <= MAX_ARRAY_LENGTH_FOR_ITERATOR);
 
     let skewed_ptr = mem.alloc_words(size_of::() + Words(len));
 
diff --git a/rts/motoko-rts/src/memory/ic.rs b/rts/motoko-rts/src/memory/ic.rs
index 8e8334dd76d..36cae23a4eb 100644
--- a/rts/motoko-rts/src/memory/ic.rs
+++ b/rts/motoko-rts/src/memory/ic.rs
@@ -1,77 +1,84 @@
 // This module is only enabled when compiling the RTS for IC or WASI.
 
+#[non_incremental_gc]
+pub mod linear_memory;
+
+#[incremental_gc]
+pub mod partitioned_memory;
+
+#[classical_persistence]
+pub mod classical_memory;
+
+#[enhanced_orthogonal_persistence]
+pub mod enhanced_memory;
+
+use motoko_rts_macros::{
+    classical_persistence, enhanced_orthogonal_persistence, incremental_gc, non_incremental_gc,
+};
+
 use super::Memory;
-use crate::constants::WASM_PAGE_SIZE;
-use crate::rts_trap_with;
-use crate::types::{Bytes, Value, Words};
-use core::arch::wasm32;
 
+// Provided by generated code
 extern "C" {
+    #[classical_persistence]
+    pub(crate) fn get_static_roots() -> crate::types::Value;
+
     fn keep_memory_reserve() -> bool;
 }
 
+#[classical_persistence]
+pub(crate) unsafe fn get_aligned_heap_base() -> usize {
+    classical_memory::get_aligned_heap_base()
+}
+
+#[enhanced_orthogonal_persistence]
+pub(crate) unsafe fn get_aligned_heap_base() -> usize {
+    enhanced_memory::get_aligned_heap_base()
+}
+
 /// Provides a `Memory` implementation, to be used in functions compiled for IC or WASI. The
 /// `Memory` implementation allocates in Wasm heap with Wasm `memory.grow` instruction.
 pub struct IcMemory;
 
-impl Memory for IcMemory {
-    #[inline]
-    unsafe fn alloc_words(&mut self, n: Words) -> Value {
-        crate::gc::incremental::get_partitioned_heap().allocate(self, n)
-    }
-
-    /// Page allocation. Ensures that the memory up to, but excluding, the given pointer is allocated.
-    /// Ensure a memory reserve of at least one Wasm page depending on the canister state.
-    /// `memory_reserve`: A memory reserve in bytes ensured during update and initialization calls.
-    /// For use by queries and upgrade calls. The reserve may vary depending on the phase of the incremental GC.
-    #[inline(never)]
-    unsafe fn grow_memory(&mut self, ptr: u64) {
-        const LAST_PAGE_LIMIT: usize = 0xFFFF_0000;
-        debug_assert_eq!(LAST_PAGE_LIMIT, usize::MAX - WASM_PAGE_SIZE.as_usize() + 1);
-        // Spare a memory reserve during update and initialization calls for use by queries and upgrades.
-        let memory_reserve = if keep_memory_reserve() {
-            crate::gc::incremental::memory_reserve()
-        } else {
-            0
-        };
-        // In any case, the last Wasm memory page is reserved to guard against shadow call stack overflows.
-        // This call stack is used both by the Rust runtime system implementation and by the compiler backend,
-        // see module `Stack` in `compile.ml`. This requires function activation frames to be less than the
-        // Wasm page size.
-        debug_assert!(memory_reserve <= LAST_PAGE_LIMIT);
-        let limit = LAST_PAGE_LIMIT - memory_reserve;
-        // The pointer is one byte larger than the memory size to be allocated, see the comment above.
-        if ptr > limit as u64 {
-            rts_trap_with("Cannot grow memory")
-        };
-        let page_size = u64::from(WASM_PAGE_SIZE.as_u32());
-        let total_pages_needed = ((ptr + page_size - 1) / page_size) as usize;
-        let current_pages = wasm32::memory_size(0);
-        if total_pages_needed > current_pages {
-            if wasm32::memory_grow(0, total_pages_needed - current_pages) == core::usize::MAX {
-                // replica signals that there is not enough memory
-                rts_trap_with("Cannot grow memory");
-            }
-        }
-    }
+/// Number of Wasm pages in main memory.
+#[classical_persistence]
+#[non_incremental_gc]
+fn wasm_memory_size() -> usize {
+    classical_memory::wasm_memory_size()
 }
 
-#[no_mangle]
-unsafe extern "C" fn get_reclaimed() -> Bytes {
-    crate::gc::incremental::get_partitioned_heap().reclaimed_size()
+/// Page allocation. Ensures that the memory up to, but excluding, the given pointer is allocated.
+/// Ensure a memory reserve of at least one Wasm page depending on the canister state.
+/// `memory_reserve`: A memory reserve in bytes ensured during update and initialization calls.
+/// The reserve can be used by queries and upgrade calls.
+#[classical_persistence]
+unsafe fn grow_memory(ptr: u64, memory_reserve: usize) {
+    classical_memory::grow_memory(ptr, memory_reserve);
 }
 
-#[no_mangle]
-pub unsafe extern "C" fn get_total_allocations() -> Bytes {
-    Bytes(u64::from(get_heap_size().as_u32())) + get_reclaimed()
+/// Page allocation. Ensures that the memory up to, but excluding, the given pointer is allocated.
+/// Ensure a memory reserve of at least one Wasm page depending on the canister state.
+/// `memory_reserve`: A memory reserve in bytes ensured during update and initialization calls.
+/// The reserve can be used by queries and upgrade calls.
+#[enhanced_orthogonal_persistence]
+unsafe fn grow_memory(ptr: u64, memory_reserve: usize) {
+    use core::mem::size_of;
+    // Statically assert the safe conversion from `u64` to `usize`.
+    const _: () = assert!(size_of::() == size_of::());
+    enhanced_memory::grow_memory(ptr as usize, memory_reserve);
 }
 
-#[no_mangle]
-pub unsafe extern "C" fn get_heap_size() -> Bytes {
-    crate::gc::incremental::get_partitioned_heap().occupied_size()
+/// Grow memory without memory reserve (except the last WASM page).
+/// Used during RTS initialization.
+#[classical_persistence]
+#[incremental_gc]
+pub(crate) unsafe fn allocate_wasm_memory(memory_size: crate::types::Bytes) {
+    classical_memory::allocate_wasm_memory(memory_size);
 }
 
-#[no_mangle]
-pub unsafe extern "C" fn get_max_live_size() -> Bytes {
-    crate::gc::incremental::get_max_live_size()
+/// Grow memory without memory reserve (except the last WASM page).
+/// Used during RTS initialization.
+#[enhanced_orthogonal_persistence]
+pub(crate) unsafe fn allocate_wasm_memory(memory_size: crate::types::Bytes) {
+    enhanced_memory::allocate_wasm_memory(memory_size);
 }
diff --git a/rts/motoko-rts/src/memory/ic/classical_memory.rs b/rts/motoko-rts/src/memory/ic/classical_memory.rs
new file mode 100644
index 00000000000..a26eeea8a87
--- /dev/null
+++ b/rts/motoko-rts/src/memory/ic/classical_memory.rs
@@ -0,0 +1,62 @@
+use motoko_rts_macros::incremental_gc;
+
+use crate::constants::WASM_PAGE_SIZE;
+use crate::memory::ic::keep_memory_reserve;
+use crate::rts_trap_with;
+use core::arch::wasm32;
+
+// Provided by generated code
+extern "C" {
+    fn get_heap_base() -> usize;
+}
+
+pub(crate) unsafe fn get_aligned_heap_base() -> usize {
+    // align to 32 bytes
+    ((get_heap_base() + 31) / 32) * 32
+}
+
+/// Number of Wasm pages in main memory.
+pub fn wasm_memory_size() -> usize {
+    wasm32::memory_size(0)
+}
+
+/// Page allocation. Ensures that the memory up to, but excluding, the given pointer is allocated.
+/// Ensure a memory reserve of at least one Wasm page depending on the canister state.
+/// `memory_reserve`: A memory reserve in bytes ensured during update and initialization calls.
+/// The reserve can be used by queries and upgrade calls. The reserve may vary depending on the GC
+/// and the phase of the GC.
+pub unsafe fn grow_memory(ptr: u64, memory_reserve: usize) {
+    const LAST_PAGE_LIMIT: usize = usize::MAX - WASM_PAGE_SIZE.as_usize() + 1;
+    // Spare a memory reserve during update and initialization calls for use by queries and upgrades.
+    let memory_reserve = if keep_memory_reserve() {
+        memory_reserve
+    } else {
+        0
+    };
+    // In any case, the last Wasm memory page is reserved to guard against shadow call stack overflows.
+    // This call stack is used both by the Rust runtime system implementation and by the compiler backend,
+    // see module `Stack` in `compile.ml`. This requires function activation frames to be less than the
+    // Wasm page size.
+    debug_assert!(memory_reserve <= LAST_PAGE_LIMIT);
+    let limit = LAST_PAGE_LIMIT - memory_reserve;
+    // The pointer is one byte larger than the memory size to be allocated, see the comment above.
+    if ptr > limit as u64 {
+        rts_trap_with("Cannot grow memory")
+    };
+    let page_size = WASM_PAGE_SIZE.as_usize() as u64;
+    let total_pages_needed = ((ptr + page_size - 1) / page_size) as usize;
+    let current_pages = wasm_memory_size();
+    if total_pages_needed > current_pages {
+        if wasm32::memory_grow(0, total_pages_needed - current_pages) == core::usize::MAX {
+            // replica signals that there is not enough memory
+            rts_trap_with("Cannot grow memory");
+        }
+    }
+}
+
+/// Grow memory without memory reserve (except the last WASM page).
+/// Used during RTS initialization.
+#[incremental_gc]
+pub(crate) unsafe fn allocate_wasm_memory(memory_size: crate::types::Bytes) {
+    grow_memory(memory_size.as_usize() as u64, WASM_PAGE_SIZE.as_usize());
+}
diff --git a/rts/motoko-rts/src/memory/ic/enhanced_memory.rs b/rts/motoko-rts/src/memory/ic/enhanced_memory.rs
new file mode 100644
index 00000000000..365e667ac74
--- /dev/null
+++ b/rts/motoko-rts/src/memory/ic/enhanced_memory.rs
@@ -0,0 +1,84 @@
+use crate::constants::{GB, WASM_PAGE_SIZE};
+use crate::memory::ic::keep_memory_reserve;
+use crate::rts_trap_with;
+use crate::types::Bytes;
+use core::arch::wasm64;
+use core::cmp::max;
+
+pub(crate) unsafe fn get_aligned_heap_base() -> usize {
+    crate::persistence::HEAP_START
+}
+
+/// Probe the memory capacity beyond this limit by pre-allocating the reserve.
+/// This is necessary because there is no function to query the IC's main memory capacity for 64-bit.
+/// It is assumed that the capacity is at least the 32-bit space of 4GB, minus the last reserved
+/// Wasm page (for Rust call stack overflow detection, see `compile.ml`).
+const GUARANTEED_MEMORY_CAPACITY: usize = 4 * GB - WASM_PAGE_SIZE.0;
+
+/// Assumption (not correctness-critical): The IC offers main memory in multiple of 2 GB.
+/// This helps to avoid overly frequent memory probing when the heap grows.
+/// The capacity granularity only serves as a heuristics for GC scheduling.
+const IC_MEMORY_CAPACITY_GRANULARITY: usize = 2 * GB;
+
+/// Page allocation. Ensures that the memory up to, but excluding, the given pointer is allocated.
+/// Ensure a memory reserve of at least one Wasm page depending on the canister state.
+/// `memory_reserve`: A memory reserve in bytes ensured during update and initialization calls.
+/// The reserve can be used by queries and upgrade calls.
+pub(crate) unsafe fn grow_memory(ptr: usize, memory_reserve: usize) {
+    debug_assert!(memory_reserve <= GUARANTEED_MEMORY_CAPACITY);
+    let memory_demand =
+        if keep_memory_reserve() && ptr > GUARANTEED_MEMORY_CAPACITY - memory_reserve {
+            // Detect overflow of `ptr + memory_reserve`.
+            if ptr > usize::MAX - memory_reserve {
+                rts_trap_with("Cannot grow memory");
+            }
+            // The reserve will be pre-allocated as a way to check the main memory capacity.
+            // As the reserve can be substantial, this is only done when memory demand has
+            // grown beyond `GUARANTEED_MEMORY_CAPACITY`.
+            ptr + memory_reserve
+        } else {
+            // Either no reserve is needed or there is enough guaranteed memory capacity for the reserve,
+            // we can skip the pre-allocation of a reserve.
+            ptr
+        };
+    allocate_wasm_memory(Bytes(memory_demand));
+}
+
+/// Supposed minimum memory capacity used for GC scheduling heuristics.
+/// The result may increase after time. This is because the actual capacity is
+/// not known upfront and can only derived by memory allocation probing.
+/// Moreover, the IC may increase the canister main memory capacity in newer versions.
+pub(crate) fn minimum_memory_capacity() -> Bytes {
+    let allocated_memory = wasm64::memory_size(0) * WASM_PAGE_SIZE.as_usize();
+    let unrounded_capacity = max(allocated_memory, GUARANTEED_MEMORY_CAPACITY);
+    Bytes(unrounded_capacity).next_multiple_of(IC_MEMORY_CAPACITY_GRANULARITY)
+}
+
+/// Grow memory without memory reserve. Used during RTS initialization and by the ordinary
+/// reserve-conscious memory-grow operation (`Memory::grow_memory`).
+pub(crate) unsafe fn allocate_wasm_memory(memory_size: Bytes) {
+    const LAST_PAGE_LIMIT: usize = 0xFFFF_FFFF_FFFF_0000;
+    debug_assert_eq!(LAST_PAGE_LIMIT, usize::MAX - WASM_PAGE_SIZE.as_usize() + 1);
+    // Never allocate the last page (shadow call stack overflow detection, see `compile.ml`).
+    if memory_size.as_usize() > LAST_PAGE_LIMIT {
+        rts_trap_with("Cannot grow memory");
+    }
+    if !probe_wasm_memory(memory_size) {
+        // replica signals that there is not enough memory
+        rts_trap_with("Cannot grow memory");
+    }
+}
+
+/// Try to allocate an amount of Wasm memory by growing the Wasm memory space if needed.
+/// Returns true if the memory has been allocated and is available.
+/// Otherwise, it returns false if there does not exist enough Wasm memory.
+pub(crate) unsafe fn probe_wasm_memory(memory_size: Bytes) -> bool {
+    let page_size = WASM_PAGE_SIZE.as_usize();
+    let total_pages_needed = (memory_size.as_usize() + page_size - 1) / page_size;
+    let current_pages = wasm64::memory_size(0);
+    if total_pages_needed > current_pages {
+        wasm64::memory_grow(0, total_pages_needed - current_pages) != core::usize::MAX
+    } else {
+        true
+    }
+}
diff --git a/rts/motoko-rts/src/memory/ic/linear_memory.rs b/rts/motoko-rts/src/memory/ic/linear_memory.rs
new file mode 100644
index 00000000000..6009546f4f6
--- /dev/null
+++ b/rts/motoko-rts/src/memory/ic/linear_memory.rs
@@ -0,0 +1,80 @@
+use super::{get_aligned_heap_base, IcMemory, Memory};
+use crate::{memory::GENERAL_MEMORY_RESERVE, types::*};
+
+/// Amount of garbage collected so far.
+pub(crate) static mut RECLAIMED: Bytes = Bytes(0);
+
+/// Maximum live data retained in a GC.
+pub(crate) static mut MAX_LIVE: Bytes = Bytes(0);
+
+// Heap pointer (skewed)
+extern "C" {
+    fn setHP(new_hp: usize);
+    fn getHP() -> usize;
+}
+
+pub(crate) unsafe fn set_hp_unskewed(new_hp: usize) {
+    setHP(skew(new_hp))
+}
+pub(crate) unsafe fn get_hp_unskewed() -> usize {
+    unskew(getHP())
+}
+
+/// Heap pointer after last GC
+pub(crate) static mut LAST_HP: usize = 0;
+
+pub(crate) unsafe fn initialize() {
+    LAST_HP = get_aligned_heap_base();
+    set_hp_unskewed(LAST_HP);
+}
+
+#[no_mangle]
+unsafe extern "C" fn get_reclaimed() -> Bytes {
+    RECLAIMED
+}
+
+#[no_mangle]
+pub unsafe extern "C" fn get_total_allocations() -> Bytes {
+    Bytes(get_heap_size().as_usize() as u64) + get_reclaimed()
+}
+
+#[no_mangle]
+pub unsafe extern "C" fn get_heap_size() -> Bytes {
+    Bytes(get_hp_unskewed() - get_aligned_heap_base())
+}
+
+#[no_mangle]
+unsafe extern "C" fn get_max_live_size() -> Bytes {
+    MAX_LIVE
+}
+
+impl Memory for IcMemory {
+    #[inline]
+    unsafe fn alloc_words(&mut self, n: Words) -> Value {
+        let bytes = n.to_bytes();
+        let delta = bytes.as_usize() as u64;
+
+        // Update heap pointer
+        let old_hp = getHP() as u64;
+        let new_hp = old_hp + delta;
+
+        // Grow memory if needed
+        if new_hp > ((super::wasm_memory_size() as u64) << 16) {
+            linear_grow_memory(new_hp);
+        }
+
+        debug_assert!(new_hp <= core::usize::MAX as u64);
+        setHP(new_hp as usize);
+
+        Value::from_raw(old_hp as usize)
+    }
+
+    #[inline(never)]
+    unsafe fn grow_memory(&mut self, ptr: usize) {
+        linear_grow_memory(ptr as u64);
+    }
+}
+
+unsafe fn linear_grow_memory(ptr: u64) {
+    super::grow_memory(ptr as u64, GENERAL_MEMORY_RESERVE);
+}
diff --git a/rts/motoko-rts/src/memory/ic/partitioned_memory.rs b/rts/motoko-rts/src/memory/ic/partitioned_memory.rs
new file mode 100644
index 00000000000..7da5edaac66
--- /dev/null
+++ b/rts/motoko-rts/src/memory/ic/partitioned_memory.rs
@@ -0,0 +1,35 @@
+use super::{IcMemory, Memory};
+use crate::types::*;
+
+#[no_mangle]
+unsafe extern "C" fn get_reclaimed() -> Bytes {
+    crate::gc::incremental::get_partitioned_heap().reclaimed_size()
+}
+
+#[no_mangle]
+pub unsafe extern "C" fn get_total_allocations() -> Bytes {
+    crate::gc::incremental::get_partitioned_heap().total_allocated_size()
+}
+
+#[no_mangle]
+pub unsafe extern "C" fn get_max_live_size() -> Bytes {
+    crate::gc::incremental::get_max_live_size()
+}
+
+#[no_mangle]
+pub unsafe extern "C" fn get_heap_size() -> Bytes {
+    crate::gc::incremental::get_partitioned_heap().occupied_size()
+}
+
+impl Memory for IcMemory {
+    #[inline]
+    unsafe fn alloc_words(&mut self, n: Words) -> Value {
+        crate::gc::incremental::get_partitioned_heap().allocate(self, n)
+    }
+
+    #[inline(never)]
+    unsafe fn grow_memory(&mut self, ptr: usize) {
+        let memory_reserve = crate::gc::incremental::memory_reserve();
+        super::grow_memory(ptr as u64, memory_reserve);
+    }
+}
diff --git a/rts/motoko-rts/src/persistence.rs b/rts/motoko-rts/src/persistence.rs
index f51cf6da1fe..3f6f0e9735e 100644
--- a/rts/motoko-rts/src/persistence.rs
+++ b/rts/motoko-rts/src/persistence.rs
@@ -9,23 +9,24 @@ use motoko_rts_macros::ic_mem_fn;
 use crate::{
     barriers::write_with_barrier,
     constants::{KB, MB},
-    gc::incremental::State,
+    gc::incremental::{partitioned_heap::allocate_initial_memory, State},
     memory::Memory,
     persistence::compatibility::memory_compatible,
     region::{
         LEGACY_VERSION_NO_STABLE_MEMORY, LEGACY_VERSION_REGIONS, LEGACY_VERSION_SOME_STABLE_MEMORY,
-        VERSION_STABLE_HEAP_NO_REGIONS, VERSION_STABLE_HEAP_REGIONS,
+        VERSION_GRAPH_COPY_NO_REGIONS, VERSION_GRAPH_COPY_REGIONS, VERSION_STABLE_HEAP_NO_REGIONS,
+        VERSION_STABLE_HEAP_REGIONS,
     },
     rts_trap_with,
     stable_mem::read_persistence_version,
-    types::{Value, TAG_BLOB_B},
+    types::{Bytes, Value, TAG_BLOB_B},
 };
 
 use self::compatibility::TypeDescriptor;
 
 const FINGERPRINT: [char; 32] = [
     'M', 'O', 'T', 'O', 'K', 'O', ' ', 'O', 'R', 'T', 'H', 'O', 'G', 'O', 'N', 'A', 'L', ' ', 'P',
-    'E', 'R', 'S', 'I', 'S', 'T', 'E', 'N', 'C', 'E', ' ', '3', '2',
+    'E', 'R', 'S', 'I', 'S', 'T', 'E', 'N', 'C', 'E', ' ', '6', '4',
 ];
 const VERSION: usize = 1;
 /// The `Value` representation in the default-initialized Wasm memory.
@@ -90,34 +91,36 @@ impl PersistentMetadata {
         }
     }
 
-    unsafe fn initialize(self: *mut Self, mem: &mut M) {
+    unsafe fn initialize(self: *mut Self) {
         use crate::gc::incremental::IncrementalGC;
-        debug_assert!(!self.is_initialized());
         (*self).fingerprint = FINGERPRINT;
         (*self).version = VERSION;
         (*self).stable_actor = DEFAULT_VALUE;
         (*self).stable_type = TypeDescriptor::default();
-        (*self).incremental_gc_state = IncrementalGC::initial_gc_state(mem, HEAP_START);
+        (*self).incremental_gc_state = IncrementalGC::::initial_gc_state(HEAP_START);
         (*self).upgrade_instructions = 0;
     }
 }
 
-/// Initialize fresh persistent memory after the canister installation or
-/// reuse the persistent memory on a canister upgrade.
-pub unsafe fn initialize_memory(mem: &mut M) {
-    mem.grow_memory(HEAP_START as u64);
+/// Initialize fresh persistent memory after the canister installation or reuse
+/// the persistent memory on a canister upgrade if enhanced orthogonal persistence
+/// is active. For graph-copy-based destabilization, the memory is reinitialized.
+pub unsafe fn initialize_memory() {
+    allocate_initial_memory(Bytes(HEAP_START));
     let metadata = PersistentMetadata::get();
     if use_enhanced_orthogonal_persistence() && metadata.is_initialized() {
         metadata.check_version();
     } else {
-        metadata.initialize(mem);
+        metadata.initialize::();
     }
 }
 
 unsafe fn use_enhanced_orthogonal_persistence() -> bool {
     match read_persistence_version() {
         VERSION_STABLE_HEAP_NO_REGIONS | VERSION_STABLE_HEAP_REGIONS => true,
-        LEGACY_VERSION_NO_STABLE_MEMORY
+        VERSION_GRAPH_COPY_NO_REGIONS
+        | VERSION_GRAPH_COPY_REGIONS
+        | LEGACY_VERSION_NO_STABLE_MEMORY
         | LEGACY_VERSION_SOME_STABLE_MEMORY
         | LEGACY_VERSION_REGIONS => false,
         _ => rts_trap_with("Unsupported persistence version"),
@@ -162,16 +165,16 @@ pub(crate) unsafe fn stable_actor_location() -> *mut Value {
 /// Determine whether an object contains a specific field.
 /// Used for upgrading to an actor with additional stable fields.
 #[no_mangle]
-pub unsafe extern "C" fn contains_field(actor: Value, field_hash: u32) -> bool {
+pub unsafe extern "C" fn contains_field(actor: Value, field_hash: usize) -> bool {
     use crate::constants::WORD_SIZE;
 
     let object = actor.as_object();
     let hash_blob = (*object).hash_blob.as_blob();
-    assert_eq!(hash_blob.len().as_u32() % WORD_SIZE, 0);
-    let number_of_fields = hash_blob.len().as_u32() / WORD_SIZE;
-    let mut current_address = hash_blob.payload_const() as u32;
+    assert_eq!(hash_blob.len().as_usize() % WORD_SIZE, 0);
+    let number_of_fields = hash_blob.len().as_usize() / WORD_SIZE;
+    let mut current_address = hash_blob.payload_const() as usize;
     for _ in 0..number_of_fields {
-        let hash_address = current_address as *mut u32;
+        let hash_address = current_address as *mut usize;
         let hash_value = *hash_address;
         // The hash sequence is sorted: Stop when the hash matches or cannot exist.
         if hash_value >= field_hash {
@@ -182,7 +185,7 @@ pub unsafe extern "C" fn contains_field(actor: Value, field_hash: u32) -> bool {
     false
 }
 
-/// Register the stable actor type on canister installation and upgrade.
+/// Register the stable actor type on canister initialization and upgrade.
 /// The type is stored in the persistent metadata memory for later retrieval on canister upgrades.
 /// On an upgrade, the memory compatibility between the new and existing stable type is checked.
 /// The `new_type` value points to a blob encoding the new stable actor type.
@@ -224,3 +227,14 @@ pub unsafe extern "C" fn set_upgrade_instructions(instructions: u64) {
     let metadata = PersistentMetadata::get();
     (*metadata).upgrade_instructions = instructions;
 }
+
+/// Only used in WASI mode: Get a static temporary print buffer that resides in 32-bit address range.
+/// This buffer has a fix length of 512 bytes, and resides at the end of the metadata reserve.
+#[no_mangle]
+pub unsafe extern "C" fn buffer_in_32_bit_range() -> usize {
+    use crate::types::size_of;
+
+    const BUFFER_SIZE: usize = 512;
+    assert!(size_of::().to_bytes().as_usize() + BUFFER_SIZE < METADATA_RESERVE);
+    METADATA_ADDRESS + METADATA_RESERVE - BUFFER_SIZE
+}
diff --git a/rts/motoko-rts/src/persistence/compatibility.rs b/rts/motoko-rts/src/persistence/compatibility.rs
index ce61aadc1fb..e144a9ff04a 100644
--- a/rts/motoko-rts/src/persistence/compatibility.rs
+++ b/rts/motoko-rts/src/persistence/compatibility.rs
@@ -26,7 +26,7 @@ const DEFAULT_VALUE: Value = Value::from_scalar(0);
 pub struct TypeDescriptor {
     // Blob with candid-encoded type definitions.
     candid_data: Value,
-    // Blob with a list of `u32` offsets referring to the `candid_data`.
+    // Blob with a list of `usize` offsets referring to the `candid_data`.
     type_offsets: Value,
 }
 
@@ -38,10 +38,12 @@ impl TypeDescriptor {
         }
     }
 
-    pub unsafe fn new(candid_data: Value, type_offsets: Value) -> Self {
-        Self {
-            candid_data: candid_data.forward_if_possible(),
-            type_offsets: type_offsets.forward_if_possible(),
+    pub fn new(candid_data: Value, type_offsets: Value) -> Self {
+        unsafe {
+            Self {
+                candid_data: candid_data.forward_if_possible(),
+                type_offsets: type_offsets.forward_if_possible(),
+            }
         }
     }
 
@@ -53,6 +55,14 @@ impl TypeDescriptor {
         assert!(self.candid_data != DEFAULT_VALUE && self.type_offsets != DEFAULT_VALUE);
     }
 
+    pub fn candid_data(&self) -> Value {
+        self.candid_data
+    }
+
+    pub fn type_offsets(&self) -> Value {
+        self.type_offsets
+    }
+
     // GC root if part of the persistent stable type
     pub fn candid_data_location(&mut self) -> *mut Value {
         &mut self.candid_data as *mut Value
@@ -72,7 +82,7 @@ impl TypeDescriptor {
 
     pub unsafe fn type_count(&self) -> usize {
         let blob_size = self.type_offsets.as_blob().len();
-        assert_eq!(blob_size.as_u32() % WORD_SIZE, 0);
+        assert_eq!(blob_size.as_usize() % WORD_SIZE, 0);
         blob_size.to_words().as_usize()
     }
 
@@ -80,15 +90,15 @@ impl TypeDescriptor {
     // be used during a single IC message when no GC increment is running in between.
     pub unsafe fn build_type_table(&mut self, mem: &mut M) -> *mut *mut u8 {
         let type_count = self.type_count();
-        let temporary_blob = alloc_blob(mem, TAG_BLOB_B, Words(type_count as u32).to_bytes());
-        let offset_table = self.type_offsets.as_blob().payload_const() as *const u32;
+        let temporary_blob = alloc_blob(mem, TAG_BLOB_B, Words(type_count).to_bytes());
+        let offset_table = self.type_offsets.as_blob().payload_const() as *const usize;
         let type_table = temporary_blob.as_blob_mut().payload_addr() as *mut *mut u8;
         let candid_data = self.candid_data.as_blob_mut().payload_addr();
         for index in 0..type_count {
             let offset = *offset_table.add(index);
-            debug_assert!((offset as usize) < self.candid_length());
+            debug_assert!(offset < self.candid_length());
             let entry = type_table.add(index);
-            *entry = candid_data.add(offset as usize);
+            *entry = candid_data.add(offset);
         }
         type_table
     }
@@ -110,16 +120,16 @@ unsafe fn create_type_check_cache(
     old_type: &TypeDescriptor,
     new_type: &TypeDescriptor,
 ) -> BitRel {
-    let old_type_count = old_type.type_count() as u32;
-    let new_type_count = new_type.type_count() as u32;
+    let old_type_count = old_type.type_count();
+    let new_type_count = new_type.type_count();
     let words = Words(BitRel::words(old_type_count, new_type_count));
     let byte_length = words.to_bytes();
     let blob_value = alloc_blob(mem, TAG_BLOB_B, byte_length);
-    let ptr = blob_value.as_blob_mut().payload_addr() as *mut u32;
+    let ptr = blob_value.as_blob_mut().payload_addr() as *mut usize;
     let end = blob_value
         .as_blob()
         .payload_const()
-        .add(byte_length.as_usize()) as *mut u32;
+        .add(byte_length.as_usize()) as *mut usize;
     let cache = BitRel {
         ptr,
         end,
diff --git a/rts/motoko-rts/src/principal_id.rs b/rts/motoko-rts/src/principal_id.rs
index c24be6009eb..1be577a9ca2 100644
--- a/rts/motoko-rts/src/principal_id.rs
+++ b/rts/motoko-rts/src/principal_id.rs
@@ -22,7 +22,7 @@ pub unsafe extern "C" fn compute_crc32(blob: Value) -> u32 {
 
     let mut crc: u32 = !0;
 
-    for i in 0..len.as_u32() {
+    for i in 0..len.as_usize() {
         let octet = blob.get(i);
         crc = (crc >> 8) ^ CRC_TABLE[usize::from((crc & 0xFF) as u8 ^ octet)];
     }
@@ -98,7 +98,7 @@ pub unsafe fn base32_of_checksummed_blob(mem: &mut M, b: Value) -> Va
     let n = b.as_blob().len();
     let mut data = b.as_blob().payload_const();
 
-    let r = alloc_blob(mem, TAG_BLOB_T, Bytes((n.as_u32() + 4 + 4) / 5 * 8)); // contains padding
+    let r = alloc_blob(mem, TAG_BLOB_T, Bytes((n.as_usize() + 4 + 4) / 5 * 8)); // contains padding
     let blob = r.as_blob_mut();
     let dest = blob.payload_addr();
 
@@ -114,7 +114,7 @@ pub unsafe fn base32_of_checksummed_blob(mem: &mut M, b: Value) -> Va
     enc_stash(&mut pump, (checksum >> 8) as u8);
     enc_stash(&mut pump, checksum as u8);
 
-    for _ in 0..n.as_u32() {
+    for _ in 0..n.as_usize() {
         enc_stash(&mut pump, *data);
         data = data.add(1);
     }
@@ -125,7 +125,7 @@ pub unsafe fn base32_of_checksummed_blob(mem: &mut M, b: Value) -> Va
         stash_enc_base32(pump.pending_data as u8, pump.dest);
         pump.dest = pump.dest.add(1);
         // Discount padding
-        let new_len = Bytes(pump.dest.offset_from(dest) as u32);
+        let new_len = Bytes(pump.dest.offset_from(dest) as usize);
         blob.shrink(new_len);
     }
 
@@ -185,7 +185,7 @@ pub unsafe fn base32_to_blob(mem: &mut M, b: Value) -> Value {
     let mut data = b.as_blob().payload_const();
 
     // Every group of 8 characters will yield 5 bytes
-    let r = alloc_blob(mem, TAG_BLOB_B, Bytes(((n.as_u32() + 7) / 8) * 5)); // we deal with padding later
+    let r = alloc_blob(mem, TAG_BLOB_B, Bytes(((n.as_usize() + 7) / 8) * 5)); // we deal with padding later
     let blob = r.as_blob_mut();
     let dest = blob.payload_addr();
 
@@ -197,13 +197,13 @@ pub unsafe fn base32_to_blob(mem: &mut M, b: Value) -> Value {
         pending_data: 0,
     };
 
-    for _ in 0..n.as_u32() {
+    for _ in 0..n.as_usize() {
         dec_stash(&mut pump, *data);
         data = data.add(1);
     }
 
     // Adjust resulting blob len
-    let new_len = Bytes(pump.dest.offset_from(dest) as u32);
+    let new_len = Bytes(pump.dest.offset_from(dest) as usize);
     blob.shrink(new_len);
 
     allocation_barrier(r)
@@ -225,12 +225,12 @@ unsafe fn base32_to_principal(mem: &mut M, b: Value) -> Value {
     let mut data = blob.payload_const();
 
     // Every group of 5 characters will yield 6 bytes (due to the hypen)
-    let r = alloc_blob(mem, TAG_BLOB_T, Bytes(((n.as_u32() + 4) / 5) * 6));
+    let r = alloc_blob(mem, TAG_BLOB_T, Bytes(((n.as_usize() + 4) / 5) * 6));
     let blob = r.as_blob_mut();
     let mut dest = blob.payload_addr();
 
     let mut n_written = 0;
-    for i in 0..n.as_u32() {
+    for i in 0..n.as_usize() {
         let mut byte = *data;
         data = data.add(1);
 
@@ -244,7 +244,7 @@ unsafe fn base32_to_principal(mem: &mut M, b: Value) -> Value {
         n_written += 1;
 
         // If quintet done, add hyphen
-        if n_written % 5 == 0 && i + 1 < n.as_u32() {
+        if n_written % 5 == 0 && i + 1 < n.as_usize() {
             n_written = 0;
             *dest = b'-';
             dest = dest.add(1);
@@ -252,7 +252,7 @@ unsafe fn base32_to_principal(mem: &mut M, b: Value) -> Value {
     }
 
     // Adjust result length
-    let new_len = Bytes(dest as u32 - blob.payload_addr() as u32);
+    let new_len = Bytes(dest as usize - blob.payload_addr() as usize);
     blob.shrink(new_len);
     allocation_barrier(r)
 }
@@ -286,7 +286,7 @@ pub unsafe fn blob_of_principal(mem: &mut M, t: Value) -> Value {
 }
 
 // for testing
-pub unsafe fn blob_of_ptr_size(mem: &mut M, buf: *const u8, n: Bytes) -> Value {
+pub unsafe fn blob_of_ptr_size(mem: &mut M, buf: *const u8, n: Bytes) -> Value {
     let blob = alloc_blob(mem, TAG_BLOB_B, n);
     let payload_addr = blob.as_blob_mut().payload_addr();
     memcpy_bytes(payload_addr as usize, buf as usize, n);
@@ -294,5 +294,5 @@ pub unsafe fn blob_of_ptr_size(mem: &mut M, buf: *const u8, n: Bytes<
 }
 
 pub unsafe fn blob_of_str(mem: &mut M, s: &str) -> Value {
-    blob_of_ptr_size(mem, s.as_ptr(), Bytes(s.len() as u32))
+    blob_of_ptr_size(mem, s.as_ptr(), Bytes(s.len()))
 }
diff --git a/rts/motoko-rts/src/print.rs b/rts/motoko-rts/src/print.rs
index 11ee84be33e..6832ab70d3b 100644
--- a/rts/motoko-rts/src/print.rs
+++ b/rts/motoko-rts/src/print.rs
@@ -8,7 +8,7 @@ use core::fmt;
 extern "C" {
     // `print_ptr` is implemented by the code generator and has different implementations depending
     // on the target platform (WASI or IC).
-    fn print_ptr(ptr: usize, len: u32);
+    fn print_ptr(ptr: usize, len: usize);
 }
 
 /*
@@ -61,13 +61,13 @@ macro_rules! format {
 
 /// A buffer that implements `core::fmt::Write`. `Write` methods will write to the buffer until
 /// it's filled and then ignore the rest, without failing.
-pub(crate) struct WriteBuf<'a> {
+pub struct WriteBuf<'a> {
     buf: &'a mut [u8],
     offset: usize,
 }
 
 impl<'a> WriteBuf<'a> {
-    pub(crate) fn new(buf: &'a mut [u8]) -> Self {
+    pub fn new(buf: &'a mut [u8]) -> Self {
         Self { buf, offset: 0 }
     }
 
@@ -76,7 +76,7 @@ impl<'a> WriteBuf<'a> {
     }
 
     pub(crate) unsafe fn print(&self) {
-        print_ptr(self.buf.as_ptr() as usize, self.offset as u32)
+        print_ptr(self.buf.as_ptr() as usize, self.offset)
     }
 }
 
@@ -96,10 +96,10 @@ impl<'a> fmt::Write for WriteBuf<'a> {
     }
 }
 
-pub(crate) unsafe fn print(buf: &WriteBuf) {
+pub unsafe fn print(buf: &WriteBuf) {
     buf.print()
 }
 
-// pub(crate) unsafe fn print_str(str: &str) {
-//     print_ptr(str.as_ptr() as usize, str.len() as u32)
-// }
+pub(crate) unsafe fn print_str(str: &str) {
+    print_ptr(str.as_ptr() as usize, str.len())
+}
diff --git a/rts/motoko-rts/src/region.rs b/rts/motoko-rts/src/region.rs
index 0df31a02629..6ec109d3aa4 100644
--- a/rts/motoko-rts/src/region.rs
+++ b/rts/motoko-rts/src/region.rs
@@ -5,18 +5,27 @@ use crate::types::{size_of, Blob, Bytes, Region, Value, TAG_BLOB_B, TAG_REGION};
 
 // Versions
 // Should agree with constants in StableMem in compile.ml
-// Version 0 to 2 are legacy.
-pub const LEGACY_VERSION_NO_STABLE_MEMORY: u32 = 0; // Never manifests in serialized form
-pub const LEGACY_VERSION_SOME_STABLE_MEMORY: u32 = 1;
-pub const LEGACY_VERSION_REGIONS: u32 = 2;
-pub const VERSION_STABLE_HEAP_NO_REGIONS: u32 = 3;
-pub const VERSION_STABLE_HEAP_REGIONS: u32 = 4;
 
+// Legacy versions, used with classical persistence
+pub const LEGACY_VERSION_NO_STABLE_MEMORY: usize = 0; // Never manifests in serialized form
+pub const LEGACY_VERSION_SOME_STABLE_MEMORY: usize = 1;
+pub const LEGACY_VERSION_REGIONS: usize = 2;
+
+// New versions, used with enhanced orthogonal persistence
+pub(crate) const VERSION_GRAPH_COPY_NO_REGIONS: usize = 3;
+pub(crate) const VERSION_GRAPH_COPY_REGIONS: usize = 4;
+pub(crate) const VERSION_STABLE_HEAP_NO_REGIONS: usize = 5;
+pub(crate) const VERSION_STABLE_HEAP_REGIONS: usize = 6;
+
+const _: () = assert!(meta_data::size::PAGE_IN_BYTES == crate::stable_mem::PAGE_SIZE);
 const _: () = assert!(meta_data::size::PAGES_IN_BLOCK <= u8::MAX as u32);
 const _: () = assert!(meta_data::max::BLOCKS <= u16::MAX);
 const _: () = assert!(meta_data::max::REGIONS <= u64::MAX - 1);
 
-use motoko_rts_macros::ic_mem_fn;
+use motoko_rts_macros::{
+    classical_persistence, enhanced_orthogonal_persistence, ic_mem_fn,
+    uses_enhanced_orthogonal_persistence,
+};
 
 unsafe fn region_trap_with(msg: &str) -> ! {
     trap_with_prefix("Region error: ", msg)
@@ -128,13 +137,13 @@ impl AccessVector {
     }
 
     pub unsafe fn set_ith_block_id(&self, i: u32, block_id: &BlockId) {
-        debug_assert!(i * 2 + 1 < self.0.len().as_u32());
-        self.0.set_u16(i, block_id.0)
+        debug_assert!((i as usize) * 2 + 1 < self.0.len().as_usize());
+        self.0.set_u16(i as usize, block_id.0)
     }
 
     pub unsafe fn get_ith_block_id(&self, i: u32) -> BlockId {
-        debug_assert!(i * 2 + 1 < self.0.len().as_u32());
-        BlockId(self.0.get_u16(i))
+        debug_assert!((i as usize) * 2 + 1 < self.0.len().as_usize());
+        BlockId(self.0.get_u16(i as usize))
     }
 }
 
@@ -379,7 +388,7 @@ unsafe fn write_magic() {
 unsafe fn alloc_region(
     mem: &mut M,
     id: u64,
-    page_count: u32,
+    page_count: usize,
     vec_pages: Value,
 ) -> Value {
     let r_ptr = mem.alloc_words(size_of::());
@@ -391,8 +400,8 @@ unsafe fn alloc_region(
     region.write_id64(id);
     debug_assert!(
         page_count
-            <= (vec_pages.as_blob().len().as_u32() / meta_data::bytes_of::() as u32)
-                * meta_data::size::PAGES_IN_BLOCK
+            <= (vec_pages.as_blob().len().as_usize() / meta_data::bytes_of::() as usize)
+                * meta_data::size::PAGES_IN_BLOCK as usize
     );
     (*region).page_count = page_count;
     init_with_barrier(mem, &mut (*region).vec_pages, vec_pages);
@@ -406,7 +415,7 @@ unsafe fn init_region(
     mem: &mut M,
     r: Value,
     id: u64,
-    page_count: u32,
+    page_count: usize,
     vec_pages: Value,
 ) {
     let r = r.as_region();
@@ -414,8 +423,8 @@ unsafe fn init_region(
     r.write_id64(id);
     debug_assert!(
         page_count
-            <= (vec_pages.as_blob().len().as_u32() / meta_data::bytes_of::() as u32)
-                * meta_data::size::PAGES_IN_BLOCK
+            <= (vec_pages.as_blob().len().as_usize() / meta_data::bytes_of::() as usize)
+                * meta_data::size::PAGES_IN_BLOCK as usize
     );
     (*r).page_count = page_count;
     write_with_barrier(mem, &mut (*r).vec_pages, vec_pages);
@@ -428,7 +437,7 @@ pub unsafe fn region_id(_mem: &mut M, r: Value) -> u64 {
 }
 
 #[ic_mem_fn]
-pub unsafe fn region_page_count(_mem: &mut M, r: Value) -> u32 {
+pub unsafe fn region_page_count(_mem: &mut M, r: Value) -> usize {
     let r = r.as_untagged_region();
     (*r).page_count
 }
@@ -472,21 +481,43 @@ pub(crate) unsafe fn region0_get_ptr_loc() -> *mut Value {
     &mut REGION_0
 }
 
-#[ic_mem_fn]
-pub unsafe fn region_new(mem: &mut M) -> Value {
+#[classical_persistence]
+unsafe fn migrate_on_new_region(mem: &mut M) {
+    match crate::stable_mem::get_version() {
+        LEGACY_VERSION_NO_STABLE_MEMORY => {
+            assert_eq!(crate::stable_mem::size(), 0);
+            region_migration_from_no_stable_memory(mem);
+        }
+        LEGACY_VERSION_SOME_STABLE_MEMORY => {
+            region_migration_from_some_stable_memory(mem);
+        }
+        LEGACY_VERSION_REGIONS => {}
+        _ => {
+            assert!(false);
+        }
+    }
+}
+
+#[enhanced_orthogonal_persistence]
+unsafe fn migrate_on_new_region(mem: &mut M) {
     match crate::stable_mem::get_version() {
-        VERSION_STABLE_HEAP_REGIONS => {}
-        VERSION_STABLE_HEAP_NO_REGIONS => {
+        VERSION_STABLE_HEAP_NO_REGIONS | VERSION_GRAPH_COPY_NO_REGIONS => {
             if crate::stable_mem::size() == 0 {
                 region_migration_from_no_stable_memory(mem);
             } else {
                 region_migration_from_some_stable_memory(mem);
             }
         }
+        VERSION_STABLE_HEAP_REGIONS | VERSION_GRAPH_COPY_REGIONS => {}
         _ => {
             assert!(false);
         }
-    };
+    }
+}
+
+#[ic_mem_fn]
+pub unsafe fn region_new(mem: &mut M) -> Value {
+    migrate_on_new_region(mem);
 
     let next_id = meta_data::total_allocated_regions::get();
 
@@ -533,7 +564,7 @@ pub unsafe fn region_recover(mem: &mut M, rid: &RegionId) -> Value {
     let vec_pages = alloc_blob(
         mem,
         TAG_BLOB_B,
-        Bytes(block_count * bytes_of::() as u32),
+        Bytes(block_count as usize * bytes_of::() as usize),
     );
 
     let av = AccessVector(vec_pages.as_blob_mut());
@@ -554,15 +585,36 @@ pub unsafe fn region_recover(mem: &mut M, rid: &RegionId) -> Value {
     assert_eq!(recovered_blocks, block_count);
     allocation_barrier(vec_pages);
 
-    let r_ptr = alloc_region(mem, rid.0, page_count as u32, vec_pages);
+    let r_ptr = alloc_region(mem, rid.0, page_count as usize, vec_pages);
     r_ptr
 }
 
+fn upgrade_version_to_regions() {
+    let new_version = if uses_enhanced_orthogonal_persistence!() {
+        match crate::stable_mem::get_version() {
+            VERSION_STABLE_HEAP_NO_REGIONS => VERSION_STABLE_HEAP_REGIONS,
+            VERSION_GRAPH_COPY_NO_REGIONS => VERSION_GRAPH_COPY_REGIONS,
+            _ => unreachable!(),
+        }
+    } else {
+        LEGACY_VERSION_REGIONS
+    };
+    crate::stable_mem::set_version(new_version);
+}
+
 pub(crate) unsafe fn region_migration_from_no_stable_memory(mem: &mut M) {
     use crate::stable_mem::{get_version, grow, size, write};
     use meta_data::size::{PAGES_IN_BLOCK, PAGE_IN_BYTES};
 
-    assert!(get_version() == VERSION_STABLE_HEAP_NO_REGIONS);
+    if uses_enhanced_orthogonal_persistence!() {
+        assert!(
+            get_version() == VERSION_STABLE_HEAP_NO_REGIONS
+                || get_version() == VERSION_GRAPH_COPY_NO_REGIONS
+        );
+    } else {
+        assert!(get_version() == LEGACY_VERSION_NO_STABLE_MEMORY);
+    }
+
     assert_eq!(size(), 0);
 
     // pages required for meta_data (9/ 960KiB), much less than PAGES_IN_BLOCK (128/ 8MB) for a full block
@@ -592,7 +644,7 @@ pub(crate) unsafe fn region_migration_from_no_stable_memory(mem: &mut
     // Write magic header
     write_magic();
 
-    crate::stable_mem::set_version(VERSION_STABLE_HEAP_REGIONS);
+    upgrade_version_to_regions();
 
     // Region 0 -- classic API for stable memory, as a dedicated region.
     REGION_0 = region_new(mem);
@@ -669,7 +721,8 @@ pub(crate) unsafe fn region_migration_from_some_stable_memory(mem: &m
     // Temp for the head block, which we move to be physically last.
     // NB: no allocation_barrier is required: header_val is temporary and can be reclaimed by the next GC increment/run.
     // TODO: instead of allocating an 8MB blob, just stack-allocate a tmp page and zero page, and transfer/zero-init via the stack, using a loop.
-    let header_val = crate::memory::alloc_blob(mem, TAG_BLOB_B, crate::types::Bytes(header_len));
+    let header_val =
+        crate::memory::alloc_blob(mem, TAG_BLOB_B, crate::types::Bytes(header_len as usize));
     let header_blob = header_val.as_blob_mut();
     let header_bytes =
         core::slice::from_raw_parts_mut(header_blob.payload_addr(), header_len as usize);
@@ -681,7 +734,10 @@ pub(crate) unsafe fn region_migration_from_some_stable_memory(mem: &m
         header_bytes,
     );
 
-    crate::mem_utils::memzero_bytes((header_blob.payload_addr()) as usize, Bytes(header_len));
+    crate::mem_utils::memzero_bytes(
+        (header_blob.payload_addr()) as usize,
+        Bytes(header_len as usize),
+    );
 
     write(0, header_bytes); // Zero out first block, for region manager meta data.
 
@@ -716,7 +772,7 @@ pub(crate) unsafe fn region_migration_from_some_stable_memory(mem: &m
         meta_data::block_region_table::set(BlockId(i), Some((RegionId(0), rank, page_count)))
     }
 
-    crate::stable_mem::set_version(VERSION_STABLE_HEAP_REGIONS);
+    upgrade_version_to_regions();
 
     /* "Recover" the region data into a heap object. */
     REGION_0 = region_recover(mem, &RegionId(0));
@@ -768,10 +824,42 @@ pub(crate) unsafe fn region_migration_from_regions_plus(mem: &mut M)
 //
 // region manager migration/initialization, with pre-existing stable data.
 //
+#[classical_persistence]
+#[ic_mem_fn(ic_only)]
+pub(crate) unsafe fn region_init(mem: &mut M, use_stable_regions: usize) {
+    match crate::stable_mem::get_version() {
+        LEGACY_VERSION_NO_STABLE_MEMORY => {
+            assert!(crate::stable_mem::size() == 0);
+            if use_stable_regions != 0 {
+                region_migration_from_no_stable_memory(mem);
+                debug_assert!(meta_data::offset::FREE < BLOCK_BASE);
+                debug_assert!(BLOCK_BASE == meta_data::offset::BASE_LOW);
+            };
+        }
+        LEGACY_VERSION_SOME_STABLE_MEMORY => {
+            assert!(crate::stable_mem::size() > 0);
+            if use_stable_regions != 0 {
+                region_migration_from_some_stable_memory(mem);
+                debug_assert!(meta_data::offset::FREE < BLOCK_BASE);
+                debug_assert!(BLOCK_BASE == meta_data::offset::BASE_HIGH);
+            };
+        }
+        _ => {
+            region_migration_from_regions_plus(mem); //check format & recover region0
+            debug_assert!(meta_data::offset::FREE < BLOCK_BASE);
+            debug_assert!(
+                BLOCK_BASE == meta_data::offset::BASE_LOW
+                    || BLOCK_BASE == meta_data::offset::BASE_HIGH
+            );
+        }
+    }
+}
+
+#[enhanced_orthogonal_persistence]
 #[ic_mem_fn(ic_only)]
-pub(crate) unsafe fn region_init(mem: &mut M, use_stable_regions: u32) {
+pub(crate) unsafe fn region_init(mem: &mut M, use_stable_regions: usize) {
     match crate::stable_mem::get_version() {
-        VERSION_STABLE_HEAP_NO_REGIONS => {
+        VERSION_STABLE_HEAP_NO_REGIONS | VERSION_GRAPH_COPY_NO_REGIONS => {
             if use_stable_regions != 0 {
                 if crate::stable_mem::size() == 0 {
                     region_migration_from_no_stable_memory(mem);
@@ -784,7 +872,7 @@ pub(crate) unsafe fn region_init(mem: &mut M, use_stable_regions: u32
                 }
             }
         }
-        VERSION_STABLE_HEAP_REGIONS => {
+        VERSION_STABLE_HEAP_REGIONS | VERSION_GRAPH_COPY_REGIONS => {
             region_migration_from_regions_plus(mem); //check format & recover region0
             debug_assert!(meta_data::offset::FREE < BLOCK_BASE);
             debug_assert!(
@@ -799,7 +887,7 @@ pub(crate) unsafe fn region_init(mem: &mut M, use_stable_regions: u32
 #[ic_mem_fn]
 pub unsafe fn region_size(_mem: &mut M, r: Value) -> u64 {
     let r = r.as_region();
-    (*r).page_count.into()
+    (*r).page_count as u64
 }
 
 #[ic_mem_fn]
@@ -811,7 +899,7 @@ pub unsafe fn region_grow(mem: &mut M, r: Value, new_pages: u64) -> u
     let r = r.as_region();
     let old_page_count = (*r).page_count;
 
-    if new_pages > (max_pages_in_region - old_page_count) as u64 {
+    if new_pages > (max_pages_in_region as usize - old_page_count) as u64 {
         return u64::MAX;
     }
 
@@ -819,8 +907,9 @@ pub unsafe fn region_grow(mem: &mut M, r: Value, new_pages: u64) -> u
 
     let new_pages_ = new_pages as u32;
 
-    let old_block_count = (old_page_count + (PAGES_IN_BLOCK - 1)) / PAGES_IN_BLOCK;
-    let new_block_count = (old_page_count + new_pages_ + (PAGES_IN_BLOCK - 1)) / PAGES_IN_BLOCK;
+    let old_block_count = (old_page_count as u32 + (PAGES_IN_BLOCK - 1)) / PAGES_IN_BLOCK;
+    let new_block_count =
+        (old_page_count as u32 + new_pages_ + (PAGES_IN_BLOCK - 1)) / PAGES_IN_BLOCK;
     let inc_block_count = new_block_count - old_block_count;
 
     // Determine the required total number of allocated blocks,
@@ -848,14 +937,14 @@ pub unsafe fn region_grow(mem: &mut M, r: Value, new_pages: u64) -> u
         let r_id = RegionId::from_id(r.read_id64());
 
         // Increase both:
-        (*r).page_count += new_pages_;
+        (*r).page_count += new_pages_ as usize;
         if old_block_count > 0 {
             let last_block_rank = (old_block_count - 1) as u16;
             let last_block_id =
                 AccessVector((*r).vec_pages.as_blob_mut()).get_ith_block_id(last_block_rank as u32);
-            debug_assert_eq!((*r).page_count, old_page_count + new_pages_);
+            debug_assert_eq!((*r).page_count, old_page_count + new_pages_ as usize);
             let last_page_count =
-                block_page_count(last_block_rank, new_block_count, (*r).page_count);
+                block_page_count(last_block_rank, new_block_count, (*r).page_count as u32);
             let assoc = Some((r_id, last_block_rank, last_page_count));
             meta_data::block_region_table::set(last_block_id, assoc);
         }
@@ -864,7 +953,7 @@ pub unsafe fn region_grow(mem: &mut M, r: Value, new_pages: u64) -> u
     let new_vec_pages = alloc_blob(
         mem,
         TAG_BLOB_B,
-        Bytes(new_block_count * meta_data::bytes_of::() as u32),
+        Bytes(new_block_count as usize * meta_data::bytes_of::() as usize),
     );
     let old_vec_byte_count = old_block_count * meta_data::bytes_of::() as u32;
 
@@ -872,7 +961,7 @@ pub unsafe fn region_grow(mem: &mut M, r: Value, new_pages: u64) -> u
     crate::mem_utils::memcpy_bytes(
         new_vec_pages.as_blob_mut().payload_addr() as usize,
         (*r).vec_pages.as_blob().payload_const() as usize,
-        Bytes(old_vec_byte_count),
+        Bytes(old_vec_byte_count as usize),
     );
 
     let new_pages = AccessVector::from_value(&new_vec_pages);
@@ -888,7 +977,7 @@ pub unsafe fn region_grow(mem: &mut M, r: Value, new_pages: u64) -> u
         let block_id: u16 = (old_total_blocks + rel_i as u32) as u16;
 
         // Update stable memory with new association.
-        let block_page_count = block_page_count(i as u16, new_block_count, (*r).page_count);
+        let block_page_count = block_page_count(i as u16, new_block_count, (*r).page_count as u32);
         let assoc = Some((RegionId::from_id(r.read_id64()), i as u16, block_page_count));
         meta_data::block_region_table::set(BlockId(block_id), assoc);
 
@@ -897,7 +986,7 @@ pub unsafe fn region_grow(mem: &mut M, r: Value, new_pages: u64) -> u
 
     allocation_barrier(new_vec_pages);
     write_with_barrier(mem, &mut (*r).vec_pages, new_vec_pages);
-    old_page_count.into()
+    old_page_count as u64
 }
 
 pub(crate) unsafe fn region_load(_mem: &mut M, r: Value, offset: u64, dst: &mut [u8]) {
@@ -1049,16 +1138,16 @@ pub(crate) unsafe fn region_load_blob(
     mem: &mut M,
     r: Value,
     offset: u64,
-    len: u32,
+    len: usize,
 ) -> Value {
-    let blob_val = crate::memory::alloc_blob(mem, TAG_BLOB_B, crate::types::Bytes(len));
+    let blob_val = crate::memory::alloc_blob(mem, TAG_BLOB_B, crate::types::Bytes(len as usize));
     let blob = blob_val.as_blob_mut();
 
-    if len < (isize::MAX as u32) {
+    if len < (isize::MAX as usize) {
         let bytes: &mut [u8] = core::slice::from_raw_parts_mut(blob.payload_addr(), len as usize);
         region_load(mem, r, offset, bytes);
     } else {
-        assert!((len / 2) < isize::MAX as u32);
+        assert!((len / 2) < isize::MAX as usize);
         let bytes_low: &mut [u8] =
             core::slice::from_raw_parts_mut(blob.payload_addr(), (len / 2) as usize);
         region_load(mem, r, offset, bytes_low);
@@ -1114,11 +1203,11 @@ pub(crate) unsafe fn region_store_blob(mem: &mut M, r: Value, offset:
     let blob = blob.as_blob();
     let len = blob.len().0;
     let bytes = blob.payload_const();
-    if len < (isize::MAX as u32) {
+    if len < (isize::MAX as usize) {
         let bytes: &[u8] = core::slice::from_raw_parts(bytes, len as usize);
         region_store(mem, r, offset, bytes);
     } else {
-        assert!((len / 2) < isize::MAX as u32);
+        assert!((len / 2) < isize::MAX as usize);
         let bytes_low: &[u8] = core::slice::from_raw_parts(bytes, (len / 2) as usize);
         region_store(mem, r, offset, bytes_low);
         let bytes_high: &[u8] =
diff --git a/rts/motoko-rts/src/stabilization.rs b/rts/motoko-rts/src/stabilization.rs
new file mode 100644
index 00000000000..8840dd0e23d
--- /dev/null
+++ b/rts/motoko-rts/src/stabilization.rs
@@ -0,0 +1,88 @@
+//! Graph-copy-based stabilzation on upgrades, serializing the entire stable object graph into
+//! stable memory by using a defined long-term stable storage format.
+//!
+//! This is to support potentially radical changes of the persistent main memory layout, e.g.
+//! introducing a new GC or rearranging persistent metadata. This also relies on precise value
+//! tagging to allow more advanced changes that require value metadata, e.g. specializing arrays for
+//! small element types or even downgrading to 32-bit heap layouts (provided that the amount of live
+//! data fits into a 32-bit memory).
+//!
+//! A memory compatibility check similar to enhanced orthogonal persistence has to be performed.
+//! For this purpose, the type table of the serialized object graph is also stored in stable memory
+//! and on upgrade, compared to the new program version.
+//!
+//! A versioned stable storage format even permits future evolutions of the graph copy algorithm.
+//!  
+//! See `GraphCopyStabilization.md` for the stable format specification and the employed algorithm.
+
+pub mod deserialization;
+pub mod graph_copy;
+pub mod layout;
+pub mod serialization;
+
+#[cfg(feature = "ic")]
+pub mod ic;
+
+use core::cmp::min;
+
+use crate::{
+    constants::KB,
+    rts_trap_with,
+    stable_mem::{self, ic0_stable64_write, PAGE_SIZE},
+};
+
+use self::layout::StableValue;
+
+extern "C" {
+    pub fn moc_stabilization_instruction_limit() -> u64;
+    pub fn moc_stable_memory_access_limit() -> u64;
+    fn ic0_performance_counter(number: u32) -> u64;
+}
+
+// Dummy value used for non-stable objects that are potentially reachable from
+// stable variable because of structural subtyping or `Any`-subtyping.
+// Must be a non-skewed value such that the GC also ignores this value.
+const DUMMY_VALUE: StableValue = StableValue::from_raw(0);
+
+/// Note: This is called incrementally in smaller chunks by the destabilization
+/// as it may otherwise exceed the instruction limit.
+fn clear_stable_memory(start: u64, length: u64) {
+    // Optimal point for the two cost functions, according to experimental measurements:
+    // * Smaller chunks cause more stable API calls that incur costs.
+    // * Larger chunks cause higher chunk zero-initialization costs.
+    const CHUNK_SIZE: usize = KB;
+    let empty_chunk = [0u8; CHUNK_SIZE];
+    let mut position = start;
+    let end = start + length;
+    while position < end {
+        let size = min(end - position, CHUNK_SIZE as u64);
+        unsafe {
+            ic0_stable64_write(position, &empty_chunk as *const u8 as u64, size);
+        }
+        position += size;
+    }
+}
+
+fn grant_stable_space(byte_size: u64) {
+    debug_assert!(byte_size < u64::MAX - PAGE_SIZE - 1);
+    let required_pages = (byte_size + PAGE_SIZE - 1) / PAGE_SIZE;
+    let available_pages = stable_memory_physical_size();
+    if required_pages > available_pages {
+        let additional_pages = required_pages - available_pages;
+        debug_assert_ne!(additional_pages, u64::MAX);
+        let result = stable_memory_physical_grow(additional_pages);
+        if result == u64::MAX {
+            unsafe {
+                rts_trap_with("Insufficient stable memory");
+            }
+        }
+    }
+}
+
+fn stable_memory_physical_size() -> u64 {
+    unsafe { stable_mem::ic0_stable64_size() }
+}
+
+fn stable_memory_physical_grow(additional_pages: u64) -> u64 {
+    unsafe { stable_mem::ic0_stable64_grow(additional_pages) }
+}
diff --git a/rts/motoko-rts/src/stabilization/deserialization.rs b/rts/motoko-rts/src/stabilization/deserialization.rs
new file mode 100644
index 00000000000..7cd2d4501d0
--- /dev/null
+++ b/rts/motoko-rts/src/stabilization/deserialization.rs
@@ -0,0 +1,222 @@
+mod scan_stack;
+pub mod stable_memory_access;
+
+use crate::{
+    constants::MB,
+    gc::incremental::array_slicing::slice_array,
+    memory::Memory,
+    stabilization::deserialization::scan_stack::STACK_EMPTY,
+    types::{FwdPtr, Tag, Value, TAG_ARRAY_SLICE_MIN, TAG_FWD_PTR},
+    visitor::visit_pointer_fields,
+};
+
+use self::{scan_stack::ScanStack, stable_memory_access::StableMemoryAccess};
+
+use super::{
+    clear_stable_memory,
+    graph_copy::{limit::ExecutionMonitor, GraphCopy},
+    layout::{deserialize, StableValue},
+};
+
+pub struct Deserialization {
+    from_space: StableMemoryAccess,
+    scan_stack: ScanStack,
+    stable_start: u64,
+    stable_size: u64,
+    stable_root: Option,
+    limit: ExecutionMonitor,
+    clear_position: u64,
+}
+
+/// Helper type to pass serialization context instead of closures.
+pub struct DeserializationContext<'a, M: Memory> {
+    pub deserialization: &'a mut Deserialization,
+    pub mem: &'a mut M,
+}
+
+impl<'a, M: Memory> DeserializationContext<'a, M> {
+    fn new(
+        deserialization: &'a mut Deserialization,
+        mem: &'a mut M,
+    ) -> DeserializationContext<'a, M> {
+        DeserializationContext {
+            deserialization,
+            mem,
+        }
+    }
+}
+
+/// Graph-copy-based deserialization.
+/// Usage:
+/// ```
+/// let deserialization = Deserialization::start(mem, stable_start, stable_size);
+/// while !deserialization.is_completed() {
+///     deserialization.copy_increment();
+/// }
+/// ```
+/// Note: The deserialized memory is cleared as final process, using an incremental
+/// mechanism to avoid instruction limit exceeding.
+impl Deserialization {
+    /// Start the deserialization, followed by a series of copy increments.
+    pub fn start(mem: &mut M, stable_start: u64, stable_size: u64) -> Deserialization {
+        let from_space = StableMemoryAccess::open(stable_start, stable_size);
+        let scan_stack = unsafe { ScanStack::new(mem) };
+        let limit = ExecutionMonitor::new();
+        let mut deserialization = Deserialization {
+            from_space,
+            scan_stack,
+            stable_start,
+            stable_size,
+            stable_root: None,
+            limit,
+            clear_position: stable_start,
+        };
+        deserialization.start(mem, StableValue::serialize(Value::from_ptr(0)));
+        deserialization
+    }
+
+    pub fn get_stable_root(&self) -> Value {
+        self.stable_root.unwrap()
+    }
+
+    unsafe fn scan_deserialized<
+        'a,
+        M: Memory,
+        F: Fn(&mut DeserializationContext<'a, M>, Value) -> Value,
+    >(
+        context: &mut DeserializationContext<'a, M>,
+        target_object: Value,
+        translate: &F,
+    ) {
+        debug_assert!(target_object.is_obj());
+        visit_pointer_fields(
+            context,
+            target_object.as_obj(),
+            target_object.tag(),
+            0,
+            |context, field_address| {
+                *field_address = translate(context, *field_address);
+            },
+            |context, _, array| {
+                let length = slice_array(array);
+                if (*array).header.tag >= TAG_ARRAY_SLICE_MIN {
+                    // Push the array back on the stack to visit the next array slice.
+                    context
+                        .deserialization
+                        .scan_stack
+                        .push(context.mem, target_object);
+                }
+                length
+            },
+        );
+    }
+
+    fn stable_end(&self) -> u64 {
+        self.stable_start.checked_add(self.stable_size).unwrap()
+    }
+
+    fn processed_memory(&self) -> u64 {
+        let deserialized_memory = unsafe { deserialized_size() as u64 };
+        debug_assert!(self.clear_position >= self.stable_start);
+        let cleared_memory = self.clear_position - self.stable_start;
+        deserialized_memory + cleared_memory
+    }
+}
+
+impl GraphCopy for Deserialization {
+    fn get_forward_address(&self, stable_object: StableValue) -> Option {
+        let address = stable_object.to_stable_address();
+        let tag = self.from_space.read::(address);
+        match tag {
+            TAG_FWD_PTR => {
+                let forward_object = self.from_space.read::(address);
+                Some(forward_object.fwd)
+            }
+            _ => None,
+        }
+    }
+
+    fn set_forward_address(&mut self, stable_object: StableValue, target: Value) {
+        let address = stable_object.to_stable_address();
+        let forward_object = FwdPtr {
+            tag: TAG_FWD_PTR,
+            fwd: target,
+        };
+        self.from_space.write(address, &forward_object);
+    }
+
+    fn copy(&mut self, mem: &mut M, stable_object: StableValue) -> Value {
+        unsafe {
+            let target = deserialize(mem, &mut self.from_space, stable_object);
+            if self.stable_root.is_none() {
+                self.stable_root = Some(target);
+            }
+            self.scan_stack.push(mem, target);
+            target
+        }
+    }
+
+    /// Note:
+    /// * The deserialized memory may contain free space at a partition end.
+    fn scan(&mut self, mem: &mut M) {
+        let target_object = unsafe { self.scan_stack.pop() };
+        debug_assert!(target_object != STACK_EMPTY);
+        unsafe {
+            Self::scan_deserialized(
+                &mut DeserializationContext::new(self, mem),
+                target_object,
+                &|context, original| {
+                    let old_value = StableValue::serialize(original);
+                    if original.is_non_null_ptr() {
+                        context.deserialization.evacuate(context.mem, old_value)
+                    } else {
+                        original
+                    }
+                },
+            );
+        }
+    }
+
+    fn scanning_completed(&self) -> bool {
+        unsafe { self.scan_stack.is_empty() }
+    }
+
+    fn cleanup_completed(&self) -> bool {
+        debug_assert!(self.scanning_completed());
+        debug_assert!(self.clear_position <= self.stable_end());
+        self.clear_position >= self.stable_end()
+    }
+
+    fn cleanup(&mut self) {
+        // Optimum value according to experimental measurements:
+        // Smallest chunk size that does not cause noticeable performance regression.
+        // The granularity is still small enough to meet the instruction limit.
+        const MAX_CHUNK_SIZE: u64 = MB as u64;
+        debug_assert!(!self.cleanup_completed());
+        let end = self.stable_end();
+        assert!(self.clear_position < end);
+        let remainder = end - self.clear_position;
+        let chunk = core::cmp::min(MAX_CHUNK_SIZE, remainder);
+        clear_stable_memory(self.clear_position, chunk);
+        self.clear_position += chunk;
+    }
+
+    fn time_over(&mut self) -> bool {
+        self.limit.is_exceeded(self.processed_memory())
+    }
+
+    fn reset_time(&mut self) {
+        self.limit.reset(self.processed_memory());
+    }
+}
+
+#[cfg(feature = "ic")]
+unsafe fn deserialized_size() -> usize {
+    crate::memory::ic::partitioned_memory::get_heap_size().as_usize()
+}
+
+// Injection point for RTS unit testing.
+#[cfg(not(feature = "ic"))]
+extern "C" {
+    fn deserialized_size() -> usize;
+}
diff --git a/rts/motoko-rts/src/stabilization/deserialization/scan_stack.rs b/rts/motoko-rts/src/stabilization/deserialization/scan_stack.rs
new file mode 100644
index 00000000000..d426cc8dd51
--- /dev/null
+++ b/rts/motoko-rts/src/stabilization/deserialization/scan_stack.rs
@@ -0,0 +1,118 @@
+//! In-heap extendable scan stack to remember the deserialized objects,
+//! that still need to be scanned by Cheney's algorithm.
+//!
+//! This data structure is necessary for the partitioned heap used by the
+//! incremental GC where no linear scanning is possible because allocations
+//! addresses are not always montonically growing, not even on an empty heap.
+//! One example, is a large object that is allocated in a higher partition,
+//! while subsequently allocated normal-sized objects are placed in lower
+//! partitions.
+//!
+//! The scan stack cannot grow contiguously as new objects can be allocated
+//! during the deserialization. This is why the stack is represented as
+//! multiple tables.
+//!
+//! Doubly linked list of stack tables, each containing a series of entries.
+//! A table is represented as a blob with the following internal layout:
+//!
+//! ┌──────────┬─────────┬──────────┬─────────┬──────────────┬────────┐
+//! │ previous │   next  | entry[0] |  ...    | entry[top-1] | (free) |
+//! └──────────┴─────────┴──────────┴─────────┴──────────────┴────────┘
+//!
+//! The list is doubly linked for the following purpose:
+//! * `previous` to return to the previous table with preceding entries.
+//! * `next` avoid repeated allocations when the stack shrinks and regrows.
+//!
+//! Whenever a table is full and an entry needs to be pushed on the stack,
+//! a new stack table is allocated and linked, unless there already exists
+//! a next table. Only the last table can have free entry space.
+//!
+//! NOTES:
+//! * The tables are blobs, as their entries do not be analyzed by the
+//! destabilization or the GC.
+//! * The stack tables become garbage after a GC run and can be reclaimed.
+//!
+//! The design is mostly identical to the incremental GC's mark stack
+//! TODO: Eliminate code duplication by unfying the stack implementation for
+//! both cases.
+
+use core::ptr::null_mut;
+
+use crate::memory::{alloc_blob, Memory};
+use crate::types::{size_of, Blob, Value, TAG_BLOB_B};
+
+pub struct ScanStack {
+    last: *mut StackTable,
+    top: usize, // index of next free entry in the last stack table
+}
+
+pub const STACK_TABLE_CAPACITY: usize = 1018;
+
+pub const STACK_EMPTY: Value = Value::from_ptr(0);
+
+#[repr(C)]
+struct StackTable {
+    pub header: Blob,
+    pub previous: *mut StackTable,
+    pub next: *mut StackTable,
+    pub entries: [Value; STACK_TABLE_CAPACITY],
+}
+
+impl ScanStack {
+    pub unsafe fn new(mem: &mut M) -> ScanStack {
+        let table = Self::new_table(mem, null_mut());
+        ScanStack {
+            last: table,
+            top: 0,
+        }
+    }
+
+    pub unsafe fn push(&mut self, mem: &mut M, value: Value) {
+        debug_assert!(value != STACK_EMPTY);
+        debug_assert!(self.last != null_mut());
+        if self.top == STACK_TABLE_CAPACITY {
+            if (*self.last).next == null_mut() {
+                self.last = Self::new_table(mem, self.last);
+            } else {
+                self.last = (*self.last).next;
+            }
+            self.top = 0;
+        }
+        debug_assert!(self.top < STACK_TABLE_CAPACITY);
+        (*self.last).entries[self.top] = value;
+        self.top += 1;
+    }
+
+    /// Returns the sentinel `STACK_EMPTY` for an empty stack.
+    pub unsafe fn pop(&mut self) -> Value {
+        debug_assert!(self.last != null_mut());
+        if self.top == 0 {
+            if (*self.last).previous == null_mut() {
+                return STACK_EMPTY;
+            }
+            self.last = (*self.last).previous;
+            self.top = STACK_TABLE_CAPACITY;
+        }
+        debug_assert!(self.top > 0);
+        self.top -= 1;
+        debug_assert!(self.top < STACK_TABLE_CAPACITY);
+        (*self.last).entries[self.top]
+    }
+
+    pub unsafe fn is_empty(&self) -> bool {
+        debug_assert!(self.last != null_mut());
+        self.top == 0 && (*self.last).previous == null_mut()
+    }
+
+    unsafe fn new_table(mem: &mut M, previous: *mut StackTable) -> *mut StackTable {
+        // No post allocation barrier as this RTS-internal blob will be collected by the GC.
+        let table = alloc_blob(mem, TAG_BLOB_B, size_of::().to_bytes()).as_blob_mut()
+            as *mut StackTable;
+        (*table).previous = previous;
+        (*table).next = null_mut();
+        if previous != null_mut() {
+            (*previous).next = table;
+        }
+        table
+    }
+}
diff --git a/rts/motoko-rts/src/stabilization/deserialization/stable_memory_access.rs b/rts/motoko-rts/src/stabilization/deserialization/stable_memory_access.rs
new file mode 100644
index 00000000000..60ec3a7d88b
--- /dev/null
+++ b/rts/motoko-rts/src/stabilization/deserialization/stable_memory_access.rs
@@ -0,0 +1,58 @@
+//! Random read/write access to stable memory.
+//! Supporting Cheney's from-space in stable memory.
+
+use core::mem::{size_of, MaybeUninit};
+
+use crate::stable_mem::{ic0_stable64_read, ic0_stable64_write};
+
+/// Random access to stable memory.
+/// Used for the from-space during destabilization.
+pub struct StableMemoryAccess {
+    base_address: u64,
+    length: u64,
+}
+
+impl StableMemoryAccess {
+    pub fn open(base_address: u64, length: u64) -> StableMemoryAccess {
+        StableMemoryAccess {
+            base_address,
+            length,
+        }
+    }
+
+    pub fn read(&self, source_offset: u64) -> T {
+        let length = size_of::();
+        let mut value = unsafe { MaybeUninit::::uninit().assume_init() };
+        let value_address = &mut value as *mut T as usize;
+        self.raw_read(source_offset, value_address, length);
+        value
+    }
+
+    pub fn raw_read(&self, source_offset: u64, target_address: usize, length: usize) {
+        debug_assert!(source_offset + length as u64 <= self.length);
+        unsafe {
+            ic0_stable64_read(
+                target_address as u64,
+                self.base_address + source_offset,
+                length as u64,
+            );
+        }
+    }
+
+    pub fn write(&mut self, target_offset: u64, value: &T) {
+        let length = size_of::();
+        let value_address = value as *const T as usize;
+        self.raw_write(target_offset, value_address, length);
+    }
+
+    pub fn raw_write(&mut self, target_offset: u64, source_address: usize, length: usize) {
+        debug_assert!(target_offset + length as u64 <= self.length);
+        unsafe {
+            ic0_stable64_write(
+                self.base_address + target_offset,
+                source_address as u64,
+                length as u64,
+            );
+        }
+    }
+}
diff --git a/rts/motoko-rts/src/stabilization/graph_copy.rs b/rts/motoko-rts/src/stabilization/graph_copy.rs
new file mode 100644
index 00000000000..288b02e60e7
--- /dev/null
+++ b/rts/motoko-rts/src/stabilization/graph_copy.rs
@@ -0,0 +1,112 @@
+use crate::memory::Memory;
+
+pub mod limit;
+
+/// Generic graph copy from main memory (from-space) to stable memory (to-space).
+/// The direction of copying is fixed but the memory layout used in the from-space
+/// and the to-space flips when switching between serialization and deserialization.
+/// `S`: Source address type (from-space, main memory).
+/// `T`: Target address type (to-space, stable memory).
+/// `P`: Pointer encoding type (e.g. `u32` or `u64`).
+/// During serialization:
+/// * Main memory = main memory layout, S = Value.
+/// * Stable memory = stable memory layout, T = StableMemoryAddress.
+/// During derialization:
+/// * Main memory = stable memory layout, S = StableMemoryAddress.
+/// * Stable memory = main memory layout, T = Value.
+pub trait GraphCopy {
+    /// Start the entire graph copy algorithm: Copy the object graph reachable from the `root` pointer.
+    /// Use this as follows:
+    /// ```
+    /// copy_algorithm.start();
+    /// while !copy_algorithm.is_completed() {
+    ///     copy_algorthm.copy_increment();
+    /// }
+    /// ```
+    fn start(&mut self, mem: &mut M, root: S) {
+        self.evacuate(mem, root);
+    }
+
+    /// Determine whether the scanning algorithm is completed,
+    /// i.e. a all necessary objects have been scanned and copied.
+    fn scanning_completed(&self) -> bool;
+
+    /// Determine whether potential final cleanup work has been completed.
+    fn cleanup_completed(&self) -> bool {
+        self.scanning_completed()
+    }
+
+    /// Perform optional cleanup work after completed scanning and copying.
+    /// This work can be done in incremental steps.
+    fn cleanup(&mut self) {}
+
+    /// Determine whether the entire graph copy algorithm has been completed.
+    /// This includes an incremental copying and an incremental cleanup phase.
+    fn is_completed(&self) -> bool {
+        self.scanning_completed() && self.cleanup_completed()
+    }
+
+    /// Complete the entire graph copy algorithm.
+    fn complete(&mut self) {}
+
+    /// Copy reachable objects in a time-bounded work step with a synthetic time bound.
+    /// This allows to spread the incremental graph copy where the work is
+    /// split in multiple increments over multiple IC messages.
+    fn copy_increment(&mut self, mem: &mut M) {
+        self.reset_time();
+        while !self.scanning_completed() && !self.time_over() {
+            self.scan(mem);
+        }
+        if self.scanning_completed() {
+            while !self.cleanup_completed() && !self.time_over() {
+                self.cleanup();
+            }
+            if self.cleanup_completed() {
+                self.complete();
+            }
+        }
+    }
+
+    /// Reset the time at the beginning of a new copy increment.
+    fn reset_time(&mut self);
+
+    /// Determine whether the time of copy increment has been exceeded.
+    fn time_over(&mut self) -> bool;
+
+    /// Lazy evacuation of a single object.
+    /// Triggered for each pointer that is patched in the `scan()` function.
+    /// Determines whether the object has already been copied before, and if not,
+    /// copies it to the target space.
+    /// Returns the new target address of the object.
+    fn evacuate(&mut self, mem: &mut M, object: S) -> T {
+        match self.get_forward_address(object) {
+            Some(target) => target,
+            None => {
+                let target = self.copy(mem, object);
+                self.set_forward_address(object, target);
+                target
+            }
+        }
+    }
+
+    /// Check if the object has been forwarded.
+    /// Returns `None` if not forwarded, or otherwise, the new target address.
+    fn get_forward_address(&self, object: S) -> Option;
+
+    /// Mark the object as forwarded and record its new target address.
+    fn set_forward_address(&mut self, object: S, target: T);
+
+    /// Allocate the object in the to-space by bumping the `free` pointer.
+    /// Copy its content to that target location using the encoding of the target layout.
+    /// Notes:
+    /// * The pointer values in the field are retained as source addresses.
+    /// * The source and target layout must use the same size for addresses, e.g. 32-bit.
+    /// * The allocator must be contiguously growing. Free space must be inserted when the
+    ///   allocator uses internal fragmentation, e.g. for the partitioned heap.
+    fn copy(&mut self, mem: &mut M, object: S) -> T;
+
+    /// Read an object at the `scan` position in the to-space, and patch all the pointer fields
+    /// by translating the source pointer to the corresponding new target pointer by calling
+    /// `evacuate()`.
+    fn scan(&mut self, mem: &mut M);
+}
diff --git a/rts/motoko-rts/src/stabilization/graph_copy/limit.rs b/rts/motoko-rts/src/stabilization/graph_copy/limit.rs
new file mode 100644
index 00000000000..864aa89f419
--- /dev/null
+++ b/rts/motoko-rts/src/stabilization/graph_copy/limit.rs
@@ -0,0 +1,122 @@
+use crate::{
+    constants::KB,
+    stabilization::{
+        ic0_performance_counter, moc_stabilization_instruction_limit,
+        moc_stable_memory_access_limit,
+    },
+};
+
+/// Graph copy limits depending on the IC message type.
+/// Explicit stabilization and destabilization increments run as update messages
+/// with lower limits, while the graph copy during the actual upgrade can use
+/// higher limits.
+#[derive(Clone, Copy)]
+pub struct ExecutionLimits {
+    /// Limit of the instructions executed during a graph copy increment.
+    instruction_limit: u64,
+    /// Limit of read or written stable memory during a graph copy increment.
+    stable_memory_access_limit: u64,
+}
+
+impl ExecutionLimits {
+    /// Determine the limits for the current IC message.
+    pub fn determine() -> ExecutionLimits {
+        ExecutionLimits {
+            instruction_limit: unsafe { moc_stabilization_instruction_limit() },
+            stable_memory_access_limit: unsafe { moc_stable_memory_access_limit() },
+        }
+    }
+}
+
+/// Execution monitor for the graph copy increment.
+/// Monitoring the message instruction counter and
+/// the amount of processed memory.
+/// The latter is relevant to avoid exceeding the limit
+/// of how much stable memory be accessed in a message.
+/// Optimization: Avoiding frequent repeated calls to
+/// `ic0_performance_counter()` as this incurs 200
+/// instructions itself.
+/// Heuristic approach: The logic only occasionally
+/// synchronizes the actual instruction counter after a
+/// defined granularity of work:
+/// * A certain amount of memory has been processed:
+///     - During stabilization: Written stable memory.
+///     - During destabilization: Allocated main memory
+///       and cleared stable memory (during completion),
+///   or,
+/// * A certain number of `is_exceeded` calls has
+///   been made.
+/// Once the limit has been exceeded, the heuristics
+/// continuously returns exceeded until the monitoring
+/// is reset.
+pub struct ExecutionMonitor {
+    /// Limits depending on IC message type (upgrade or update).
+    limits: ExecutionLimits,
+    /// Instruction counter at the beginning of the measurement.
+    initial_instruction_counter: u64,
+    /// Amount of processed memory before the measurement.
+    initial_processed_memory: u64,
+    // Only used for sporadic synchronization heuristics:
+    /// Number of `is_exceeded` calls since the last instruction
+    /// counter synchronization.
+    call_counter: usize,
+    /// Amount of processed memory since the last instruction
+    /// counter synchronization.
+    last_processed: u64,
+    /// Denotes whether the limit has been exceeded.
+    exceeded: bool,
+}
+
+impl ExecutionMonitor {
+    /// Threshold on the number of `is_exceeded` calls since the
+    /// last instruction counter synchronization.
+    const CALL_THRESHOLD: usize = 1_000;
+    /// Threshold on the amount of processed memory since the last
+    /// instruction counter synchronization.
+    const MEMORY_THRESHOLD: u64 = 256 * KB as u64;
+
+    pub fn new() -> ExecutionMonitor {
+        ExecutionMonitor {
+            limits: ExecutionLimits::determine(),
+            initial_instruction_counter: Self::instruction_counter(),
+            initial_processed_memory: 0,
+            call_counter: 0,
+            last_processed: 0,
+            exceeded: false,
+        }
+    }
+
+    pub fn is_exceeded(&mut self, processed_memory: u64) -> bool {
+        debug_assert!(self.initial_processed_memory <= processed_memory);
+        // Check the memory limit.
+        if processed_memory - self.initial_processed_memory > self.limits.stable_memory_access_limit
+        {
+            return true;
+        }
+        // Check the instruction limit.
+        // Sporadic instruction counter synchronization, see above.
+        self.call_counter += 1;
+        if processed_memory >= self.last_processed.saturating_add(Self::MEMORY_THRESHOLD)
+            || self.call_counter >= Self::CALL_THRESHOLD
+        {
+            // Reset the heuristics counters.
+            self.call_counter = 0;
+            self.last_processed = processed_memory;
+            // Check actual instruction counter.
+            let current = Self::instruction_counter();
+            debug_assert!(self.initial_instruction_counter <= current);
+            let elapsed = current - self.initial_instruction_counter;
+            self.exceeded = elapsed > self.limits.instruction_limit;
+        }
+        self.exceeded
+    }
+
+    pub fn reset(&mut self, processed_memory: u64) {
+        *self = Self::new();
+        self.initial_processed_memory = processed_memory;
+    }
+
+    fn instruction_counter() -> u64 {
+        unsafe { ic0_performance_counter(0) }
+    }
+}
diff --git a/rts/motoko-rts/src/stabilization/ic.rs b/rts/motoko-rts/src/stabilization/ic.rs
new file mode 100644
index 00000000000..05a926479da
--- /dev/null
+++ b/rts/motoko-rts/src/stabilization/ic.rs
@@ -0,0 +1,278 @@
+mod metadata;
+mod performance;
+
+use motoko_rts_macros::ic_mem_fn;
+
+use crate::{
+    gc::incremental::{is_gc_stopped, resume_gc, stop_gc},
+    memory::Memory,
+    persistence::{
+        compatibility::{memory_compatible, TypeDescriptor},
+        set_upgrade_instructions,
+    },
+    rts_trap_with,
+    stabilization::ic::metadata::StabilizationMetadata,
+    stable_mem::{self, moc_stable_mem_set_size, PAGE_SIZE},
+    types::Value,
+};
+
+use self::{metadata::UpgradeStatistics, performance::InstructionMeter};
+
+use super::graph_copy::GraphCopy;
+use super::{deserialization::Deserialization, serialization::Serialization};
+
+struct StabilizationState {
+    old_candid_data: Value,
+    old_type_offsets: Value,
+    completed: bool,
+    serialization: Serialization,
+    instruction_meter: InstructionMeter,
+}
+
+impl StabilizationState {
+    fn new(
+        serialization: Serialization,
+        old_candid_data: Value,
+        old_type_offsets: Value,
+    ) -> StabilizationState {
+        StabilizationState {
+            old_candid_data,
+            old_type_offsets,
+            completed: false,
+            serialization,
+            instruction_meter: InstructionMeter::new(),
+        }
+    }
+}
+
+static mut STABILIZATION_STATE: Option = None;
+
+#[no_mangle]
+pub unsafe extern "C" fn is_graph_stabilization_started() -> bool {
+    STABILIZATION_STATE.is_some()
+}
+
+/// Start the incremental graph-copy-based stabilization.
+/// This operations initiates the graph copy stabilization instead of enhanced orthogonal persistence.
+/// It is required before a series of stabilization increments can be run.
+/// `stable_actor`: Root object for stabilization containing all stable variables of the actor.
+/// `old_candid_data`: A blob encoding the Candid type as a table.
+/// `old_type_offsets`: A blob encoding the type offsets in the Candid type table.
+///   Type index 0 represents the stable actor object to be serialized.
+/// Note:
+/// - Once started, the heap is invalidated. All application messages must be blocked after this start.
+#[ic_mem_fn(ic_only)]
+pub unsafe fn start_graph_stabilization(
+    mem: &mut M,
+    stable_actor: Value,
+    old_candid_data: Value,
+    old_type_offsets: Value,
+) {
+    assert!(STABILIZATION_STATE.is_none());
+    assert!(is_gc_stopped());
+    let stable_memory_pages = stable_mem::size(); // Backup the virtual size.
+    let serialized_data_start = stable_memory_pages * PAGE_SIZE;
+    let serialization = Serialization::start(mem, stable_actor, serialized_data_start);
+    STABILIZATION_STATE = Some(StabilizationState::new(
+        serialization,
+        old_candid_data,
+        old_type_offsets,
+    ));
+}
+
+/// Incremental graph-copy-based stabilization, serializing a limited amount of heap objects reachable
+/// from stable variables into stable memory.
+/// This function can be called multiple times before the upgrade of a large heap.
+/// The incrementality serves to support the graph-copy-based serialization large heaps that do not fit
+/// into the upgrade message instruction limit.
+/// Returns true if the stabilization has been completed.
+/// Notes:
+/// - During stabilization, the heap is invalidated. Therefore and for consistent serialization,
+///   all application messages must be blocked during this process.
+/// - This operation only runs a limited number of instructions that may not yet complete the stabilization.
+///   The compiler may needs to trigger more messages that run additional stabilzation increments, before the
+///   stabilization has been completed and the actual canister upgrade is ready to be performed.
+/// Implementation:
+/// * Algorithm: Cheney's algorithm using main memory as from-space and stable memory as to-space.
+/// * Encoding: The from-space uses the main memory heap layout, while the to-space is encoded in
+///   the stable object graph layout (see `GraphCopyStabilization.md`).
+#[ic_mem_fn(ic_only)]
+pub unsafe fn graph_stabilization_increment(mem: &mut M) -> bool {
+    let state = STABILIZATION_STATE.as_mut().unwrap();
+    if !state.completed {
+        assert!(is_gc_stopped());
+        state.instruction_meter.start();
+        state.serialization.copy_increment(mem);
+        state.instruction_meter.stop();
+        if state.serialization.is_completed() {
+            write_metadata();
+            state.completed = true;
+        }
+    }
+    state.completed
+}
+
+unsafe fn write_metadata() {
+    let state = STABILIZATION_STATE.as_mut().unwrap();
+    assert!(state.serialization.is_completed());
+    state.instruction_meter.start();
+    let serialized_data_start = state.serialization.serialized_data_start();
+    let serialized_data_length = state.serialization.serialized_data_length();
+
+    let type_descriptor = TypeDescriptor::new(state.old_candid_data, state.old_type_offsets);
+    let metadata = StabilizationMetadata {
+        serialized_data_start,
+        serialized_data_length,
+        type_descriptor,
+    };
+    state.instruction_meter.stop();
+    metadata.store(&mut state.instruction_meter);
+}
+
+struct DestabilizationState {
+    deserialization: Deserialization,
+    stabilization_statistics: UpgradeStatistics,
+    completed: bool,
+    instruction_meter: InstructionMeter,
+}
+
+static mut DESTABILIZATION_STATE: Option = None;
+
+/// Starts the graph-copy-based destabilization process.
+/// This requires that the deserialization is subsequently run and completed.
+/// Also checks whether the new program version is compatible to the stored state by comparing the type
+/// tables of both the old and the new program version.
+/// The check is identical to enhanced orthogonal persistence, except that the metadata is obtained from
+/// stable memory and not the persistent main memory.
+/// The parameters encode the type table of the new program version to which that data is to be upgraded.
+/// `new_candid_data`: A blob encoding the Candid type as a table.
+/// `new_type_offsets`: A blob encoding the type offsets in the Candid type table.
+///   Type index 0 represents the stable actor object to be serialized.
+/// Traps if the stable state is incompatible with the new program version and the upgrade is not
+/// possible.
+#[ic_mem_fn(ic_only)]
+pub unsafe fn start_graph_destabilization(
+    mem: &mut M,
+    new_candid_data: Value,
+    new_type_offsets: Value,
+) {
+    assert!(DESTABILIZATION_STATE.is_none());
+
+    let mut instruction_meter = InstructionMeter::new();
+    instruction_meter.start();
+    let mut new_type_descriptor = TypeDescriptor::new(new_candid_data, new_type_offsets);
+    let (metadata, statistics) = StabilizationMetadata::load(mem);
+    let mut old_type_descriptor = metadata.type_descriptor;
+    if !memory_compatible(mem, &mut old_type_descriptor, &mut new_type_descriptor) {
+        rts_trap_with("Memory-incompatible program upgrade");
+    }
+    // Restore the virtual size.
+    moc_stable_mem_set_size(metadata.serialized_data_start / PAGE_SIZE);
+
+    // Stop the GC until the incremental graph destabilization has been completed.
+    stop_gc();
+
+    let deserialization = Deserialization::start(
+        mem,
+        metadata.serialized_data_start,
+        metadata.serialized_data_length,
+    );
+    instruction_meter.stop();
+    DESTABILIZATION_STATE = Some(DestabilizationState {
+        deserialization,
+        stabilization_statistics: statistics,
+        completed: false,
+        instruction_meter,
+    });
+}
+
+/// Incremental graph-copy-based destabilization, deserializing a limited amount of serialized data from
+/// stable memory to the heap.
+/// This function can be called multiple times after the upgrade of a large heap.
+/// The incrementality serves to support the graph-copy-based deserialization of large heaps that do not fit
+/// into the upgrade message instruction limit.
+/// Returns true if the destabilization has been completed.
+/// Notes:
+/// - The heap is only valid after completed destabilization. Therefore and for consistent deserialization,
+///   all application messages must be blocked until this is completed.
+/// - This operation only runs a limited number of instructions that may not yet complete the upgrade.
+///   The compiler may need to trigger more messages that run additional destabilzation increments, before
+///   the destabilization is completed and the application code can resume its operation.
+/// Implementation:
+/// * Algorithm: Cheney's algorithm using stable memory as from-space and main memory as to-space.
+/// * Encoding: The from-space uses the stable memory layout, while the to-space is to be encoded in
+///   main memory layout (see `GraphCopyStabilization.md`).
+#[ic_mem_fn(ic_only)]
+pub unsafe fn graph_destabilization_increment(mem: &mut M) -> bool {
+    let state = DESTABILIZATION_STATE
+        .as_mut()
+        .unwrap_or_else(|| rts_trap_with("No destabilization needed"));
+    if !state.completed {
+        assert!(is_gc_stopped());
+        state.instruction_meter.start();
+        state.deserialization.copy_increment(mem);
+        state.instruction_meter.stop();
+        if state.deserialization.is_completed() {
+            record_upgrade_costs();
+            state.completed = true;
+            memory_sanity_check(mem);
+        }
+    }
+    state.completed
+}
+
+unsafe fn memory_sanity_check(_mem: &mut M) {
+    #[cfg(feature = "memory_check")]
+    {
+        use crate::gc::incremental::{
+            get_partitioned_heap,
+            sanity_checks::{check_memory, CheckerMode},
+        };
+
+        let state = DESTABILIZATION_STATE.as_mut().unwrap();
+        let unused_root = &mut Value::from_scalar(0) as *mut Value;
+        let roots = [
+            &mut state.deserialization.get_stable_root() as *mut Value,
+            unused_root,
+            unused_root,
+            unused_root,
+            unused_root,
+            unused_root,
+        ];
+        check_memory(
+            _mem,
+            get_partitioned_heap(),
+            roots,
+            CheckerMode::UpdateCompletion,
+        );
+    }
+}
+
+unsafe fn record_upgrade_costs() {
+    let state = DESTABILIZATION_STATE.as_ref().unwrap();
+    let total_instructions = state.stabilization_statistics.stabilization_instructions
+        + state.instruction_meter.total_elapsed();
+    set_upgrade_instructions(total_instructions);
+}
+
+/// Returns the deserialized stable actor root after the completed destabilization.
+#[no_mangle]
+pub unsafe extern "C" fn get_graph_destabilized_actor() -> Value {
+    let state = DESTABILIZATION_STATE.as_ref().unwrap();
+    assert!(state.completed);
+    state.deserialization.get_stable_root()
+}
+
+/// Stop the GC before performing incremental graph-copy-based stabilzation or destabilization.
+/// This is only a safe-guard since the compiler must not schedule the GC during stabilization
+/// and destabilization.
+#[no_mangle]
+pub unsafe extern "C" fn stop_gc_before_stabilization() {
+    stop_gc();
+}
+
+/// Start the GC after completed incremental graph-copy-based destabilization.
+#[no_mangle]
+pub unsafe extern "C" fn start_gc_after_destabilization() {
+    resume_gc();
+}
diff --git a/rts/motoko-rts/src/stabilization/ic/metadata.rs b/rts/motoko-rts/src/stabilization/ic/metadata.rs
new file mode 100644
index 00000000000..2ddcf99b5ad
--- /dev/null
+++ b/rts/motoko-rts/src/stabilization/ic/metadata.rs
@@ -0,0 +1,220 @@
+//! Graph-copy-bazed serialization format:
+//!
+//! (Very first word is zeroed and backed up in last page)
+//! -- Stable memory
+//! Raw stable memory or region data, size S in pages
+//! -- Stable variables
+//! Serialized data address N:
+//!   Serialized object graph, length L
+//!   (possible zero padding)
+//! Type descriptor address M:
+//!   Candid type table
+//!     Byte length (u64)
+//!     Data
+//!   Type offset table
+//!     Byte length (u64)
+//!     Data
+//!   (possible zero padding)
+//! -- Last physical page (metadata):
+//!   (zero padding to align at page end)
+//!   Upgrade statistics (instructions) (u64)
+//!   Serialized data address N (u64)
+//!   Serialized data length L (u64)
+//!   Type descriptor address M (u64)
+//!   First word of page 0
+//!   Version 3 or 4 (u32) (match with `VERSION_GRAPH_COPY_NO_REGIONS` and `VERSION_GRAPH_COPY_REGIONS` in `region.rs` and `compile.ml`.
+//! -- page end
+
+use crate::{
+    barriers::allocation_barrier,
+    memory::{alloc_blob, Memory},
+    persistence::compatibility::TypeDescriptor,
+    region::{
+        VERSION_GRAPH_COPY_NO_REGIONS, VERSION_GRAPH_COPY_REGIONS, VERSION_STABLE_HEAP_NO_REGIONS,
+        VERSION_STABLE_HEAP_REGIONS,
+    },
+    stabilization::{clear_stable_memory, grant_stable_space},
+    stable_mem::{
+        get_version, ic0_stable64_read, ic0_stable64_size, ic0_stable64_write, read_u32, read_u64,
+        set_version, write_u32, write_u64, PAGE_SIZE,
+    },
+    types::{size_of, Bytes, Tag, Value, TAG_BLOB_B},
+};
+
+use super::performance::InstructionMeter;
+
+#[repr(C)]
+#[derive(Default)]
+pub struct UpgradeStatistics {
+    pub stabilization_instructions: u64,
+}
+
+#[repr(C)]
+#[derive(Default)]
+struct LastPageRecord {
+    statistics: UpgradeStatistics,
+    serialized_data_address: u64,
+    serialized_data_length: u64,
+    type_descriptor_address: u64,
+    first_word_backup: u32,
+    version: u32,
+}
+
+pub struct StabilizationMetadata {
+    pub serialized_data_start: u64,
+    pub serialized_data_length: u64,
+    pub type_descriptor: TypeDescriptor,
+}
+
+impl StabilizationMetadata {
+    fn ensure_space(offset: u64, length: u64) {
+        grant_stable_space(offset + length);
+    }
+
+    fn write_length(offset: &mut u64, length: u64) {
+        Self::ensure_space(*offset, length as u64);
+        write_u64(*offset, length);
+        *offset += size_of::().to_bytes().as_usize() as u64;
+    }
+
+    fn write_blob(offset: &mut u64, value: Value) {
+        unsafe {
+            let length = value.as_blob().len().as_usize() as u64;
+            Self::write_length(offset, length);
+            Self::ensure_space(*offset, length as u64);
+            ic0_stable64_write(
+                *offset,
+                value.as_blob().payload_const() as u64,
+                length as u64,
+            );
+            *offset += length as u64;
+        }
+    }
+
+    fn align_page_start(offset: &mut u64) {
+        if *offset % PAGE_SIZE != 0 {
+            let remainder = PAGE_SIZE - *offset % PAGE_SIZE;
+            clear_stable_memory(*offset, remainder);
+            *offset += remainder;
+        }
+    }
+
+    fn save_type_descriptor(offset: &mut u64, descriptor: &TypeDescriptor) {
+        Self::write_blob(offset, descriptor.candid_data());
+        Self::write_blob(offset, descriptor.type_offsets());
+    }
+
+    fn read_length(offset: &mut u64) -> u64 {
+        let length = read_u64(*offset);
+        // Note: Do not use `types::size_of()` as it rounds to 64-bit words.
+        clear_stable_memory(*offset, core::mem::size_of::() as u64);
+        *offset += size_of::().to_bytes().as_usize() as u64;
+        length
+    }
+
+    fn read_blob(mem: &mut M, tag: Tag, offset: &mut u64) -> Value {
+        let length = Self::read_length(offset);
+        unsafe {
+            let value = alloc_blob(mem, tag, Bytes(length as usize));
+            ic0_stable64_read(
+                value.as_blob_mut().payload_addr() as u64,
+                *offset,
+                length as u64,
+            );
+            clear_stable_memory(*offset, length as u64);
+            allocation_barrier(value);
+            *offset += length as u64;
+            value
+        }
+    }
+
+    fn load_type_descriptor(mem: &mut M, offset: &mut u64) -> TypeDescriptor {
+        let candid_data = Self::read_blob(mem, TAG_BLOB_B, offset);
+        let type_offsets = Self::read_blob(mem, TAG_BLOB_B, offset);
+        TypeDescriptor::new(candid_data, type_offsets)
+    }
+
+    fn metadata_location() -> u64 {
+        let physical_pages = unsafe { ic0_stable64_size() };
+        assert!(physical_pages > 0);
+        let last_page_start = (physical_pages - 1) * PAGE_SIZE;
+        let size = size_of::().to_bytes().as_usize() as u64;
+        assert!(size < PAGE_SIZE);
+        last_page_start + (PAGE_SIZE - size)
+    }
+
+    fn write_metadata(value: &LastPageRecord) {
+        let offset = Self::metadata_location();
+        let size = size_of::().to_bytes().as_usize() as u64;
+        Self::ensure_space(offset, size);
+        unsafe {
+            ic0_stable64_write(offset, value as *const LastPageRecord as u64, size);
+        }
+    }
+
+    fn read_metadata() -> LastPageRecord {
+        let offset = Self::metadata_location();
+        let size = size_of::().to_bytes().as_usize() as u64;
+        let mut value = LastPageRecord::default();
+        unsafe {
+            ic0_stable64_read(&mut value as *mut LastPageRecord as u64, offset, size);
+        }
+        value
+    }
+
+    fn clear_metadata() {
+        Self::write_metadata(&LastPageRecord::default());
+    }
+
+    pub fn store(&self, measurement: &mut InstructionMeter) {
+        measurement.start();
+        let mut offset = self.serialized_data_start + self.serialized_data_length;
+        Self::align_page_start(&mut offset);
+        let type_descriptor_address = offset;
+        Self::save_type_descriptor(&mut offset, &self.type_descriptor);
+        Self::align_page_start(&mut offset);
+        let first_word_backup = read_u32(0);
+        // Clear very first word that is backed up in the last page.
+        // This ensures compatibility with old legacy version 0 using no
+        // experimental stable memory and no regions.
+        write_u32(0, 0);
+        measurement.stop();
+        let statistics = UpgradeStatistics {
+            stabilization_instructions: measurement.total_elapsed(),
+        };
+        let last_page_record = LastPageRecord {
+            statistics,
+            serialized_data_address: self.serialized_data_start,
+            serialized_data_length: self.serialized_data_length,
+            type_descriptor_address,
+            first_word_backup,
+            version: Self::stabilization_version() as u32,
+        };
+        Self::write_metadata(&last_page_record);
+    }
+
+    pub fn load(mem: &mut M) -> (StabilizationMetadata, UpgradeStatistics) {
+        let last_page_record = Self::read_metadata();
+        Self::clear_metadata();
+        let version = last_page_record.version as usize;
+        assert!(version == VERSION_GRAPH_COPY_NO_REGIONS || version == VERSION_GRAPH_COPY_REGIONS);
+        set_version(version);
+        write_u32(0, last_page_record.first_word_backup);
+        let mut offset = last_page_record.type_descriptor_address;
+        let type_descriptor = Self::load_type_descriptor(mem, &mut offset);
+        let metadata = StabilizationMetadata {
+            serialized_data_start: last_page_record.serialized_data_address,
+            serialized_data_length: last_page_record.serialized_data_length,
+            type_descriptor,
+        };
+        (metadata, last_page_record.statistics)
+    }
+
+    fn stabilization_version() -> usize {
+        match get_version() {
+            VERSION_STABLE_HEAP_NO_REGIONS => VERSION_GRAPH_COPY_NO_REGIONS,
+            VERSION_STABLE_HEAP_REGIONS => VERSION_GRAPH_COPY_REGIONS,
+            _ => unreachable!(),
+        }
+    }
+}
diff --git a/rts/motoko-rts/src/stabilization/ic/performance.rs b/rts/motoko-rts/src/stabilization/ic/performance.rs
new file mode 100644
index 00000000000..5f2a99ec74e
--- /dev/null
+++ b/rts/motoko-rts/src/stabilization/ic/performance.rs
@@ -0,0 +1,37 @@
+use crate::stabilization::ic0_performance_counter;
+
+pub struct InstructionMeter {
+    total_elapsed: u64,
+    start_offset: Option,
+}
+
+impl InstructionMeter {
+    pub fn new() -> InstructionMeter {
+        InstructionMeter {
+            total_elapsed: 0,
+            start_offset: None,
+        }
+    }
+
+    pub fn start(&mut self) {
+        assert!(self.start_offset.is_none());
+        self.start_offset = Some(Self::instruction_counter());
+    }
+
+    pub fn stop(&mut self) {
+        let start = self.start_offset.unwrap();
+        let stop = Self::instruction_counter();
+        debug_assert!(start <= stop);
+        self.total_elapsed += stop - start;
+        self.start_offset = None;
+    }
+
+    pub fn total_elapsed(&self) -> u64 {
+        assert!(self.start_offset.is_none());
+        self.total_elapsed
+    }
+
+    fn instruction_counter() -> u64 {
+        unsafe { ic0_performance_counter(0) }
+    }
+}
diff --git a/rts/motoko-rts/src/stabilization/layout.rs b/rts/motoko-rts/src/stabilization/layout.rs
new file mode 100644
index 00000000000..49a9e5d24b0
--- /dev/null
+++ b/rts/motoko-rts/src/stabilization/layout.rs
@@ -0,0 +1,447 @@
+//! The long-term layout definition of the stable format.
+//!
+//! The stable object graph resides a linear stable memory space.
+//!
+//! Pointers are serialized as 64-bit skewed offsets in that space.
+//! The stable encoding of scalars is currently identical to the
+//! main memory encoding, requiring the precise scalar tag as metadata.
+//! If the main memory scalar representation changes in future, the
+//! values need to be explicitly translated to the expected stable format.
+//!
+//! Each object uses a `StableTag` as header and is followed by
+//! the object payload as outlined in the corresponding Rust structs.
+//! Some objects, such as `StableArray`, `StableObject`, `StableBlob`,
+//! and `StableBigNum` have a dynamic payload body in addition to a static
+//! header.
+//!
+//! Not all heap memory object types are stabilized because some
+//! of them are not stable types. New object types can be added
+//! with backwards compatibility but encoding changes to existing stable
+//! data types must be handled with extra care to ensure backwards compatibility.
+//!
+//! Note: For a potential downgrade to 32-bit, the 64-bit stable pointer
+//! offsets can be scaled down by a factor `8` during the destabilization
+//! such that they fit into 32-bit values during Cheney's graph-copy.
+
+use crate::{
+    barriers::allocation_barrier,
+    constants::WORD_SIZE,
+    memory::Memory,
+    rts_trap_with,
+    types::{
+        base_array_tag, size_of, Tag, Value, TAG_ARRAY_I, TAG_ARRAY_M, TAG_ARRAY_S,
+        TAG_ARRAY_SLICE_MIN, TAG_ARRAY_T, TAG_BIGINT, TAG_BITS64_F, TAG_BITS64_S, TAG_BITS64_U,
+        TAG_BLOB_A, TAG_BLOB_B, TAG_BLOB_P, TAG_BLOB_T, TAG_CONCAT, TAG_MUTBOX, TAG_OBJECT,
+        TAG_REGION, TAG_SOME, TAG_VARIANT, TRUE_VALUE,
+    },
+};
+
+use self::{
+    stable_array::StableArray, stable_bigint::StableBigInt, stable_bits64::StableBits64,
+    stable_blob::StableBlob, stable_concat::StableConcat, stable_mutbox::StableMutBox,
+    stable_object::StableObject, stable_region::StableRegion, stable_some::StableSome,
+    stable_variant::StableVariant,
+};
+
+use super::{
+    deserialization::stable_memory_access::StableMemoryAccess,
+    serialization::{
+        stable_memory_stream::{ScanStream, StableMemoryStream, WriteStream},
+        SerializationContext,
+    },
+};
+
+mod stable_array;
+mod stable_bigint;
+mod stable_bits64;
+mod stable_blob;
+mod stable_concat;
+mod stable_mutbox;
+mod stable_object;
+mod stable_region;
+mod stable_some;
+mod stable_variant;
+
+/// Different kinds of objects used in the stable format.
+#[repr(u64)]
+#[derive(Clone, Copy, PartialEq, Debug)]
+pub enum StableObjectKind {
+    ArrayImmutable = 1,
+    ArrayMutable = 2,
+    ArrayTuple = 3,
+    ArraySharedFunction = 4,
+    MutBox = 5,
+    Object = 6,
+    BlobBytes = 7,
+    BlobText = 8,
+    BlobPrincipal = 9,
+    BlobActor = 10,
+    Bits64Unsigned = 11,
+    Bits64Signed = 12,
+    Bits64Float = 13,
+    Region = 14,
+    Variant = 15,
+    Concat = 16,
+    BigInt = 17,
+    Some = 18,
+}
+
+#[repr(C)]
+pub struct StableTag(u64);
+
+impl StableObjectKind {
+    pub fn encode(&self) -> StableTag {
+        StableTag(*self as u64)
+    }
+}
+
+impl StableTag {
+    pub fn decode(&self) -> StableObjectKind {
+        const STABLE_TAG_ARRAY_IMMUTABLE: u64 = StableObjectKind::ArrayImmutable as u64;
+        const STABLE_TAG_ARRAY_MUTABLE: u64 = StableObjectKind::ArrayMutable as u64;
+        const STABLE_TAG_ARRAY_TUPLE: u64 = StableObjectKind::ArrayTuple as u64;
+        const STABLE_TAG_ARRAY_SHARED_FUNCTION: u64 = StableObjectKind::ArraySharedFunction as u64;
+        const STABLE_TAG_MUTBOX: u64 = StableObjectKind::MutBox as u64;
+        const STABLE_TAG_OBJECT: u64 = StableObjectKind::Object as u64;
+        const STABLE_TAG_BLOB_BYTES: u64 = StableObjectKind::BlobBytes as u64;
+        const STABLE_TAG_BLOB_TEXT: u64 = StableObjectKind::BlobText as u64;
+        const STABLE_TAG_BLOB_PRINCIPAL: u64 = StableObjectKind::BlobPrincipal as u64;
+        const STABLE_TAG_BLOB_ACTOR: u64 = StableObjectKind::BlobActor as u64;
+        const STABLE_TAG_BITS64_UNSIGNED: u64 = StableObjectKind::Bits64Unsigned as u64;
+        const STABLE_TAG_BITS64_SIGNED: u64 = StableObjectKind::Bits64Signed as u64;
+        const STABLE_TAG_BITS64_FLOAT: u64 = StableObjectKind::Bits64Float as u64;
+        const STABLE_TAG_REGION: u64 = StableObjectKind::Region as u64;
+        const STABLE_TAG_VARIANT: u64 = StableObjectKind::Variant as u64;
+        const STABLE_TAG_CONCAT: u64 = StableObjectKind::Concat as u64;
+        const STABLE_TAG_BIGINT: u64 = StableObjectKind::BigInt as u64;
+        const STABLE_TAG_SOME: u64 = StableObjectKind::Some as u64;
+        match self.0 {
+            STABLE_TAG_ARRAY_IMMUTABLE => StableObjectKind::ArrayImmutable,
+            STABLE_TAG_ARRAY_MUTABLE => StableObjectKind::ArrayMutable,
+            STABLE_TAG_ARRAY_TUPLE => StableObjectKind::ArrayTuple,
+            STABLE_TAG_ARRAY_SHARED_FUNCTION => StableObjectKind::ArraySharedFunction,
+            STABLE_TAG_MUTBOX => StableObjectKind::MutBox,
+            STABLE_TAG_OBJECT => StableObjectKind::Object,
+            STABLE_TAG_BLOB_BYTES => StableObjectKind::BlobBytes,
+            STABLE_TAG_BLOB_TEXT => StableObjectKind::BlobText,
+            STABLE_TAG_BLOB_PRINCIPAL => StableObjectKind::BlobPrincipal,
+            STABLE_TAG_BLOB_ACTOR => StableObjectKind::BlobActor,
+            STABLE_TAG_BITS64_UNSIGNED => StableObjectKind::Bits64Unsigned,
+            STABLE_TAG_BITS64_SIGNED => StableObjectKind::Bits64Signed,
+            STABLE_TAG_BITS64_FLOAT => StableObjectKind::Bits64Float,
+            STABLE_TAG_REGION => StableObjectKind::Region,
+            STABLE_TAG_VARIANT => StableObjectKind::Variant,
+            STABLE_TAG_CONCAT => StableObjectKind::Concat,
+            STABLE_TAG_BIGINT => StableObjectKind::BigInt,
+            STABLE_TAG_SOME => StableObjectKind::Some,
+            _ => unsafe { rts_trap_with("Invalid tag") },
+        }
+    }
+}
+
+impl StableObjectKind {
+    fn deserialize(tag: Tag) -> StableObjectKind {
+        match tag {
+            // During the marking phase of the incremental GC, the mutator can see
+            // array slice information in the object tag.
+            TAG_ARRAY_I | TAG_ARRAY_M | TAG_ARRAY_T | TAG_ARRAY_S | TAG_ARRAY_SLICE_MIN.. => {
+                match base_array_tag(tag) {
+                    TAG_ARRAY_I => StableObjectKind::ArrayImmutable,
+                    TAG_ARRAY_M => StableObjectKind::ArrayMutable,
+                    TAG_ARRAY_T => StableObjectKind::ArrayTuple,
+                    TAG_ARRAY_S => StableObjectKind::ArraySharedFunction,
+                    _ => unreachable!("invalid array tag"),
+                }
+            }
+            TAG_MUTBOX => StableObjectKind::MutBox,
+            TAG_OBJECT => StableObjectKind::Object,
+            TAG_BLOB_B => StableObjectKind::BlobBytes,
+            TAG_BLOB_T => StableObjectKind::BlobText,
+            TAG_BLOB_P => StableObjectKind::BlobPrincipal,
+            TAG_BLOB_A => StableObjectKind::BlobActor,
+            TAG_BITS64_U => StableObjectKind::Bits64Unsigned,
+            TAG_BITS64_S => StableObjectKind::Bits64Signed,
+            TAG_BITS64_F => StableObjectKind::Bits64Float,
+            TAG_REGION => StableObjectKind::Region,
+            TAG_VARIANT => StableObjectKind::Variant,
+            TAG_CONCAT => StableObjectKind::Concat,
+            TAG_BIGINT => StableObjectKind::BigInt,
+            TAG_SOME => StableObjectKind::Some,
+            _ => unreachable!("invalid tag"),
+        }
+    }
+}
+
+#[repr(C)]
+#[derive(Clone, Copy, PartialEq)]
+pub struct StableValue(u64);
+
+impl StableValue {
+    fn is_ptr(&self) -> bool {
+        self.0 & 0b1 == 1 && self.0 != TRUE_VALUE as u64
+    }
+
+    fn skew(address: u64) -> u64 {
+        address.wrapping_sub(1)
+    }
+
+    fn unskew(pointer: u64) -> u64 {
+        debug_assert!(Self::from_raw(pointer).is_ptr());
+        pointer.wrapping_add(1)
+    }
+
+    pub const fn from_raw(value: u64) -> Self {
+        StableValue(value)
+    }
+
+    pub fn from_stable_address(address: u64) -> Self {
+        debug_assert_eq!(address % WORD_SIZE as u64, 0);
+        StableValue(Self::skew(address))
+    }
+
+    pub fn to_stable_address(&self) -> u64 {
+        Self::unskew(self.0)
+    }
+
+    pub fn payload_address(&self) -> u64 {
+        self.to_stable_address() + size_of::().to_bytes().as_usize() as u64
+    }
+
+    pub fn serialize(value: Value) -> Self {
+        StableValue(value.get_raw() as u64)
+    }
+
+    pub fn deserialize(&self) -> Value {
+        Value::from_raw(self.0 as usize)
+    }
+}
+
+/// Scan the static part of the object.
+trait StaticScanner {
+    // Updates potential pointers in the static part of the object.
+    // Returns true if values have been updated.
+    fn update_pointers T>(
+        &mut self,
+        _context: &mut C,
+        _translate: &F,
+    ) -> bool {
+        false
+    }
+}
+
+pub trait StableToSpace {
+    fn to_space(&mut self) -> &mut StableMemoryStream;
+}
+
+trait Serializer
+where
+    Self: Sized + StaticScanner,
+{
+    unsafe fn serialize_static_part(
+        stable_memory: &mut StableMemoryStream,
+        main_object: *mut T,
+    ) -> Self;
+    unsafe fn serialize_dynamic_part(
+        _stable_memory: &mut StableMemoryStream,
+        _main_object: *mut T,
+    ) {
+    }
+
+    unsafe fn serialize(stable_memory: &mut StableMemoryStream, main_object: Value) {
+        let stable_tag = StableObjectKind::deserialize(main_object.tag()).encode();
+        let main_object = main_object.as_obj() as *mut T;
+        stable_memory.write(&stable_tag);
+        unsafe {
+            let static_part = Self::serialize_static_part(stable_memory, main_object);
+            stable_memory.write(&static_part);
+            Self::serialize_dynamic_part(stable_memory, main_object);
+        }
+    }
+
+    fn scan_serialized<
+        'a,
+        M,
+        F: Fn(&mut SerializationContext<'a, M>, StableValue) -> StableValue,
+    >(
+        context: &mut SerializationContext<'a, M>,
+        translate: &F,
+    ) {
+        let mut static_part = context.serialization.to_space().read::();
+        if static_part.update_pointers(context, translate) {
+            context.serialization.to_space().update(&static_part);
+        }
+        static_part.scan_serialized_dynamic(context, translate);
+    }
+
+    fn scan_serialized_dynamic<
+        'a,
+        M,
+        F: Fn(&mut SerializationContext<'a, M>, StableValue) -> StableValue,
+    >(
+        &self,
+        _context: &mut SerializationContext<'a, M>,
+        _translate: &F,
+    ) {
+    }
+
+    unsafe fn allocate_deserialized(
+        &self,
+        main_memory: &mut M,
+        _object_kind: StableObjectKind,
+    ) -> Value {
+        main_memory.alloc_words(size_of::())
+    }
+
+    unsafe fn deserialize_static_part(&self, target_object: *mut T, object_kind: StableObjectKind);
+
+    unsafe fn deserialize_dynamic_part(
+        &self,
+        _main_memory: &mut M,
+        _stable_memory: &StableMemoryAccess,
+        _stable_object: StableValue,
+        _target_object: *mut T,
+    ) {
+    }
+
+    unsafe fn deserialize(
+        main_memory: &mut M,
+        stable_memory: &StableMemoryAccess,
+        stable_object: StableValue,
+        object_kind: StableObjectKind,
+    ) -> Value {
+        let stable_address = stable_object.payload_address();
+        let stable_static_part = stable_memory.read::(stable_address);
+        let target = stable_static_part.allocate_deserialized(main_memory, object_kind);
+        let target_object = target.get_ptr() as *mut T;
+        stable_static_part.deserialize_static_part(target_object, object_kind);
+        stable_static_part.deserialize_dynamic_part(
+            main_memory,
+            stable_memory,
+            stable_object,
+            target_object,
+        );
+        allocation_barrier(target)
+    }
+}
+
+pub fn round_to_u64(length: u64) -> u64 {
+    let alignment = size_of::().to_bytes().as_usize() as u64;
+    (length + alignment - 1) / alignment * alignment
+}
+
+fn write_padding_u64(stable_memory: &mut StableMemoryStream, byte_length: usize) {
+    let rounded_length = round_to_u64(byte_length as u64);
+    let padding = rounded_length - byte_length as u64;
+    for _ in 0..padding {
+        stable_memory.write(&0u8);
+    }
+}
+
+pub fn scan_serialized<
+    'a,
+    M,
+    F: Fn(&mut SerializationContext<'a, M>, StableValue) -> StableValue,
+>(
+    context: &mut SerializationContext<'a, M>,
+    translate: &F,
+) {
+    if context.serialization.pending_array_scanning() {
+        StableArray::resume_scanning(context, translate);
+        return;
+    }
+    let tag = context.serialization.to_space().read::();
+    match tag.decode() {
+        StableObjectKind::ArrayImmutable
+        | StableObjectKind::ArrayMutable
+        | StableObjectKind::ArrayTuple
+        | StableObjectKind::ArraySharedFunction => StableArray::scan_serialized(context, translate),
+        StableObjectKind::MutBox => StableMutBox::scan_serialized(context, translate),
+        StableObjectKind::Object => StableObject::scan_serialized(context, translate),
+        StableObjectKind::BlobBytes
+        | StableObjectKind::BlobText
+        | StableObjectKind::BlobPrincipal
+        | StableObjectKind::BlobActor => StableBlob::scan_serialized(context, translate),
+        StableObjectKind::Bits64Unsigned
+        | StableObjectKind::Bits64Signed
+        | StableObjectKind::Bits64Float => StableBits64::scan_serialized(context, translate),
+        StableObjectKind::Region => StableRegion::scan_serialized(context, translate),
+        StableObjectKind::Variant => StableVariant::scan_serialized(context, translate),
+        StableObjectKind::Concat => StableConcat::scan_serialized(context, translate),
+        StableObjectKind::BigInt => StableBigInt::scan_serialized(context, translate),
+        StableObjectKind::Some => StableSome::scan_serialized(context, translate),
+    }
+}
+
+pub unsafe fn serialize(stable_memory: &mut StableMemoryStream, main_object: Value) {
+    match StableObjectKind::deserialize(main_object.tag()) {
+        StableObjectKind::ArrayImmutable
+        | StableObjectKind::ArrayMutable
+        | StableObjectKind::ArrayTuple
+        | StableObjectKind::ArraySharedFunction => {
+            StableArray::serialize(stable_memory, main_object)
+        }
+        StableObjectKind::MutBox => StableMutBox::serialize(stable_memory, main_object),
+        StableObjectKind::Object => StableObject::serialize(stable_memory, main_object),
+        StableObjectKind::BlobBytes
+        | StableObjectKind::BlobText
+        | StableObjectKind::BlobPrincipal
+        | StableObjectKind::BlobActor => StableBlob::serialize(stable_memory, main_object),
+        StableObjectKind::Bits64Unsigned
+        | StableObjectKind::Bits64Signed
+        | StableObjectKind::Bits64Float => StableBits64::serialize(stable_memory, main_object),
+        StableObjectKind::Region => StableRegion::serialize(stable_memory, main_object),
+        StableObjectKind::Variant => StableVariant::serialize(stable_memory, main_object),
+        StableObjectKind::Concat => StableConcat::serialize(stable_memory, main_object),
+        StableObjectKind::BigInt => StableBigInt::serialize(stable_memory, main_object),
+        StableObjectKind::Some => StableSome::serialize(stable_memory, main_object),
+    }
+}
+
+pub unsafe fn deserialize(
+    main_memory: &mut M,
+    stable_memory: &mut StableMemoryAccess,
+    stable_object: StableValue,
+) -> Value {
+    let tag = stable_memory.read::(stable_object.to_stable_address());
+    let object_kind = tag.decode();
+    match object_kind {
+        StableObjectKind::ArrayImmutable
+        | StableObjectKind::ArrayMutable
+        | StableObjectKind::ArrayTuple
+        | StableObjectKind::ArraySharedFunction => {
+            StableArray::deserialize(main_memory, stable_memory, stable_object, object_kind)
+        }
+        StableObjectKind::MutBox => {
+            StableMutBox::deserialize(main_memory, stable_memory, stable_object, object_kind)
+        }
+        StableObjectKind::Object => {
+            StableObject::deserialize(main_memory, stable_memory, stable_object, object_kind)
+        }
+        StableObjectKind::BlobBytes
+        | StableObjectKind::BlobText
+        | StableObjectKind::BlobPrincipal
+        | StableObjectKind::BlobActor => {
+            StableBlob::deserialize(main_memory, stable_memory, stable_object, object_kind)
+        }
+        StableObjectKind::Bits64Unsigned
+        | StableObjectKind::Bits64Signed
+        | StableObjectKind::Bits64Float => {
+            StableBits64::deserialize(main_memory, stable_memory, stable_object, object_kind)
+        }
+        StableObjectKind::Region => {
+            StableRegion::deserialize(main_memory, stable_memory, stable_object, object_kind)
+        }
+        StableObjectKind::Variant => {
+            StableVariant::deserialize(main_memory, stable_memory, stable_object, object_kind)
+        }
+        StableObjectKind::Concat => {
+            StableConcat::deserialize(main_memory, stable_memory, stable_object, object_kind)
+        }
+        StableObjectKind::BigInt => {
+            StableBigInt::deserialize(main_memory, stable_memory, stable_object, object_kind)
+        }
+        StableObjectKind::Some => {
+            StableSome::deserialize(main_memory, stable_memory, stable_object, object_kind)
+        }
+    }
+}
diff --git a/rts/motoko-rts/src/stabilization/layout/stable_array.rs b/rts/motoko-rts/src/stabilization/layout/stable_array.rs
new file mode 100644
index 00000000000..7059e55ca38
--- /dev/null
+++ b/rts/motoko-rts/src/stabilization/layout/stable_array.rs
@@ -0,0 +1,149 @@
+use crate::{
+    memory::{alloc_array, Memory},
+    stabilization::{
+        deserialization::stable_memory_access::StableMemoryAccess,
+        graph_copy::GraphCopy,
+        serialization::{
+            stable_memory_stream::{ScanStream, StableMemoryStream, WriteStream},
+            ArraySlice, SerializationContext,
+        },
+    },
+    types::{size_of, Array, Tag, Value, TAG_ARRAY_I, TAG_ARRAY_M, TAG_ARRAY_S, TAG_ARRAY_T},
+};
+
+use super::{Serializer, StableObjectKind, StableToSpace, StableValue, StaticScanner};
+
+#[repr(C)]
+pub struct StableArray {
+    array_length: u64,
+    // Dynamically sized body with `array_length` elements, each of `StableValue`.
+}
+
+impl StaticScanner for StableArray {}
+
+impl Serializer for StableArray {
+    unsafe fn serialize_static_part(
+        _stable_memory: &mut StableMemoryStream,
+        array: *mut Array,
+    ) -> Self {
+        StableArray {
+            array_length: array.len() as u64,
+        }
+    }
+
+    unsafe fn serialize_dynamic_part(
+        stable_memory: &mut StableMemoryStream,
+        main_array: *mut Array,
+    ) {
+        for index in 0..main_array.len() {
+            let main_element = main_array.get(index);
+            let stable_element = StableValue::serialize(main_element);
+            stable_memory.write(&stable_element);
+        }
+    }
+
+    fn scan_serialized_dynamic<
+        'a,
+        M,
+        F: Fn(&mut SerializationContext<'a, M>, StableValue) -> StableValue,
+    >(
+        &self,
+        context: &mut SerializationContext<'a, M>,
+        translate: &F,
+    ) {
+        debug_assert!(!context.serialization.pending_array_scanning());
+        let mut index = 0;
+        self.sliced_array_scan(context, translate, &mut index);
+        if index < self.array_length {
+            let slice = ArraySlice::new(index, self.array_length);
+            context.serialization.set_array_slice(slice);
+        }
+    }
+
+    unsafe fn allocate_deserialized(
+        &self,
+        main_memory: &mut M,
+        object_kind: StableObjectKind,
+    ) -> Value {
+        let array_tag = decode_array_tag(object_kind);
+        let array_length = self.array_length as usize;
+        alloc_array(main_memory, array_tag, array_length)
+    }
+
+    unsafe fn deserialize_static_part(
+        &self,
+        target_array: *mut Array,
+        object_kind: StableObjectKind,
+    ) {
+        debug_assert_eq!((*target_array).header.tag, decode_array_tag(object_kind));
+        debug_assert_eq!((*target_array).len as u64, self.array_length);
+    }
+
+    unsafe fn deserialize_dynamic_part(
+        &self,
+        _main_memory: &mut M,
+        stable_memory: &StableMemoryAccess,
+        stable_object: StableValue,
+        target_array: *mut Array,
+    ) {
+        let stable_address = stable_object.payload_address();
+        let mut element_address =
+            stable_address + size_of::().to_bytes().as_usize() as u64;
+        for index in 0..(*target_array).len {
+            let element = stable_memory.read::(element_address);
+            target_array.set_raw(index, element.deserialize());
+            element_address += size_of::().to_bytes().as_usize() as u64;
+        }
+    }
+}
+
+fn decode_array_tag(object_kind: StableObjectKind) -> Tag {
+    match object_kind {
+        StableObjectKind::ArrayImmutable => TAG_ARRAY_I,
+        StableObjectKind::ArrayMutable => TAG_ARRAY_M,
+        StableObjectKind::ArrayTuple => TAG_ARRAY_T,
+        StableObjectKind::ArraySharedFunction => TAG_ARRAY_S,
+        _ => unreachable!("invalid stable array tag"),
+    }
+}
+
+impl StableArray {
+    pub fn resume_scanning<
+        'a,
+        M,
+        F: Fn(&mut SerializationContext<'a, M>, StableValue) -> StableValue,
+    >(
+        context: &mut SerializationContext<'a, M>,
+        translate: &F,
+    ) {
+        let mut slice = context.serialization.get_array_slice();
+        let stable_array = StableArray {
+            array_length: slice.array_length,
+        };
+        stable_array.sliced_array_scan(context, translate, &mut slice.next_index);
+        if slice.next_index < slice.array_length {
+            context.serialization.set_array_slice(slice);
+        }
+    }
+
+    fn sliced_array_scan<
+        'a,
+        M,
+        F: Fn(&mut SerializationContext<'a, M>, StableValue) -> StableValue,
+    >(
+        &self,
+        context: &mut SerializationContext<'a, M>,
+        translate: &F,
+        index: &mut u64,
+    ) {
+        while *index < self.array_length {
+            let old_value = context.serialization.to_space().read::();
+            let new_value = translate(context, old_value);
+            context.serialization.to_space().update(&new_value);
+            *index += 1;
+            if context.serialization.time_over() {
+                return;
+            }
+        }
+    }
+}
diff --git a/rts/motoko-rts/src/stabilization/layout/stable_bigint.rs b/rts/motoko-rts/src/stabilization/layout/stable_bigint.rs
new file mode 100644
index 00000000000..107508e9028
--- /dev/null
+++ b/rts/motoko-rts/src/stabilization/layout/stable_bigint.rs
@@ -0,0 +1,219 @@
+use crate::bigint::mp_calloc;
+use crate::memory::Memory;
+use crate::stabilization::deserialization::stable_memory_access::StableMemoryAccess;
+use crate::stabilization::serialization::stable_memory_stream::{
+    ScanStream, StableMemoryStream, WriteStream,
+};
+use crate::stabilization::serialization::SerializationContext;
+use crate::tommath_bindings::mp_digit;
+use crate::types::{size_of, BigInt, Bytes, Value, TAG_BIGINT};
+
+use super::{
+    round_to_u64, Serializer, StableObjectKind, StableToSpace, StableValue, StaticScanner,
+};
+
+// Tom's math library, as configured for Motoko RTS with 64-bit enhanced orthogonal persistence,
+// encodes a big numbers as an array of 64-bit elements, where each element stores 60 bits of the
+// number while its highest 4 bits are zero.
+// Similar to the little endian encoding, the element array starts with the least significant 60 bits,
+// with each subsequent element covering the next higher 60 bits.
+
+// Tom's math library functions `mp_to_sbin` and `mp_from_sbin` imply temporary object allocations
+// which could be problematic when memory is short.
+// Therefore, a custom serialization to the defined portable stable bignum format is implemented.
+
+#[repr(C)]
+pub struct StableBigInt {
+    is_negative: bool, // Sign bit.
+    number_of_bits: Bits, // Number of used bits in the payload.
+
+                       // Dynamically sized payload of the big integer number as
+                       // little endian encoded series of 60 bits packed in 64-bit elements.
+}
+
+type ElementType = u64;
+const USED_BITS_PER_ELEMENT: u32 = 60;
+const ELEMENT_SIZE: usize = core::mem::size_of::();
+
+// Assumes 64-bit representation for both Tom's math library and main memory.
+const _: () = assert!(usize::BITS == ElementType::BITS);
+const _: () = assert!(core::mem::size_of::() == core::mem::size_of::());
+
+#[repr(C)]
+struct Bits(u64);
+
+impl Bits {
+    fn to_bytes(&self) -> Bytes {
+        let bytes = ceiling_div(self.0, u8::BITS as u64);
+        Bytes(bytes as usize)
+    }
+}
+
+impl StableBigInt {
+    fn is_negative(main_object: *mut BigInt) -> bool {
+        unsafe { (*main_object).mp_int.sign != 0 }
+    }
+
+    fn serialized_length(main_object: *mut BigInt) -> Bits {
+        let used_elements = unsafe { (*main_object).mp_int.used } as usize;
+        if used_elements == 0 {
+            return Bits(0);
+        }
+        let last_element = unsafe { *main_object.payload_addr().add(used_elements - 1) };
+        debug_assert_ne!(last_element, 0);
+        debug_assert_eq!(last_element >> USED_BITS_PER_ELEMENT, 0);
+        let last_bits = ElementType::BITS - last_element.leading_zeros();
+        Bits((used_elements - 1) as u64 * USED_BITS_PER_ELEMENT as u64 + last_bits as u64)
+    }
+
+    fn deserialized_elements(&self) -> usize {
+        ceiling_div(self.number_of_bits.0, USED_BITS_PER_ELEMENT as u64) as usize
+    }
+}
+
+fn ceiling_div(dividend: u64, divisor: u64) -> u64 {
+    (dividend + divisor - 1) / divisor
+}
+
+impl StaticScanner for StableBigInt {}
+
+impl Serializer for StableBigInt {
+    unsafe fn serialize_static_part(
+        _stable_memory: &mut StableMemoryStream,
+        main_object: *mut BigInt,
+    ) -> Self {
+        let is_negative = Self::is_negative(main_object);
+        let number_of_bits = Self::serialized_length(main_object);
+        StableBigInt {
+            is_negative,
+            number_of_bits,
+        }
+    }
+
+    unsafe fn serialize_dynamic_part(
+        stable_memory: &mut StableMemoryStream,
+        main_object: *mut BigInt,
+    ) {
+        let total_elements = (*main_object).mp_int.used as usize;
+        let total_bytes = Self::serialized_length(main_object).to_bytes().as_usize();
+        let payload = main_object.payload_addr();
+        let mut written_bytes = 0;
+        let mut element_index = 0;
+        let mut element: ElementType = 0;
+        let mut pending_bits = 0;
+        let mut next_element: ElementType = 0;
+        let mut next_pending_bits = 0;
+
+        while written_bytes < total_bytes {
+            while pending_bits < ElementType::BITS && element_index < total_elements {
+                debug_assert_eq!(next_pending_bits, 0);
+                next_element = *payload.add(element_index);
+                element_index += 1;
+                debug_assert_eq!(next_element >> USED_BITS_PER_ELEMENT, 0);
+                element |= next_element << pending_bits;
+                let consumed_bits =
+                    core::cmp::min(USED_BITS_PER_ELEMENT, ElementType::BITS - pending_bits);
+                pending_bits += consumed_bits;
+                next_element >>= consumed_bits;
+                next_pending_bits = USED_BITS_PER_ELEMENT - consumed_bits;
+            }
+            stable_memory.write(&element); // little endian
+            written_bytes += ELEMENT_SIZE;
+            element = next_element;
+            pending_bits = next_pending_bits;
+            next_pending_bits = 0;
+        }
+        debug_assert!(pending_bits == 0 || element == 0);
+        debug_assert_eq!(element_index, total_elements);
+        debug_assert_eq!(written_bytes % core::mem::size_of::(), 0);
+    }
+
+    fn scan_serialized_dynamic<
+        'a,
+        M,
+        F: Fn(&mut SerializationContext<'a, M>, StableValue) -> StableValue,
+    >(
+        &self,
+        context: &mut SerializationContext<'a, M>,
+        _translate: &F,
+    ) {
+        let rounded_length = round_to_u64(self.number_of_bits.to_bytes().as_usize() as u64);
+        context
+            .serialization
+            .to_space()
+            .skip(rounded_length as usize);
+    }
+
+    unsafe fn allocate_deserialized(
+        &self,
+        main_memory: &mut M,
+        object_kind: StableObjectKind,
+    ) -> Value {
+        debug_assert_eq!(object_kind, StableObjectKind::BigInt);
+        let elements = self.deserialized_elements();
+        let payload = mp_calloc(main_memory, elements, Bytes(ELEMENT_SIZE)) as *mut mp_digit;
+        let bigint = BigInt::from_payload(payload);
+        Value::from_ptr(bigint as usize)
+    }
+
+    unsafe fn deserialize_static_part(
+        &self,
+        target_bigint: *mut BigInt,
+        object_kind: StableObjectKind,
+    ) {
+        debug_assert_eq!(object_kind, StableObjectKind::BigInt);
+        debug_assert_eq!((*target_bigint).header.tag, TAG_BIGINT);
+        let elements = self.deserialized_elements();
+        debug_assert_eq!((*target_bigint).mp_int.alloc as usize, elements);
+        (*target_bigint).mp_int.sign = if self.is_negative { 1 } else { 0 };
+        (*target_bigint).mp_int.used = elements as i32;
+    }
+
+    unsafe fn deserialize_dynamic_part(
+        &self,
+        _main_memory: &mut M,
+        stable_memory: &StableMemoryAccess,
+        stable_object: StableValue,
+        target_bigint: *mut BigInt,
+    ) {
+        let stable_address = stable_object.payload_address();
+        let source_payload =
+            stable_address + size_of::().to_bytes().as_usize() as u64;
+        let target_payload = target_bigint.payload_addr();
+        let total_bytes = self.number_of_bits.to_bytes().as_usize();
+        let total_elements = self.deserialized_elements();
+
+        let mut read_offset = 0;
+        let mut element_index = 0;
+        let mut element: ElementType = 0;
+        let mut pending_bits = 0;
+        let mut next_byte = 0;
+        let mut next_pending_bits = 0;
+
+        while element_index < total_elements {
+            while pending_bits < USED_BITS_PER_ELEMENT && read_offset < total_bytes {
+                debug_assert_eq!(next_pending_bits, 0);
+                next_byte = stable_memory.read::(source_payload + read_offset as u64);
+                read_offset += 1;
+                element |= (next_byte as ElementType) << pending_bits;
+                element &= (1 << USED_BITS_PER_ELEMENT) - 1;
+                let consumed_bits = core::cmp::min(u8::BITS, USED_BITS_PER_ELEMENT - pending_bits);
+                pending_bits += consumed_bits;
+                next_byte = if consumed_bits < u8::BITS {
+                    next_byte >> consumed_bits
+                } else {
+                    0
+                };
+                next_pending_bits = u8::BITS - consumed_bits;
+            }
+            *target_payload.add(element_index) = element; // little endian
+            element_index += 1;
+            element = next_byte as ElementType;
+            pending_bits = next_pending_bits;
+            next_pending_bits = 0;
+        }
+
+        debug_assert!(pending_bits == 0 || element == 0);
+        debug_assert_eq!(element_index, self.deserialized_elements());
+    }
+}
diff --git a/rts/motoko-rts/src/stabilization/layout/stable_bits64.rs b/rts/motoko-rts/src/stabilization/layout/stable_bits64.rs
new file mode 100644
index 00000000000..8f5ee03be70
--- /dev/null
+++ b/rts/motoko-rts/src/stabilization/layout/stable_bits64.rs
@@ -0,0 +1,45 @@
+use crate::{
+    stabilization::serialization::stable_memory_stream::StableMemoryStream,
+    types::{Bits64, Tag, Value, TAG_BITS64_F, TAG_BITS64_S, TAG_BITS64_U},
+};
+
+use super::{Serializer, StableObjectKind, StableValue, StaticScanner};
+
+#[repr(C)]
+pub struct StableBits64 {
+    bits: u64,
+}
+
+impl StaticScanner for StableBits64 {}
+
+impl Serializer for StableBits64 {
+    unsafe fn serialize_static_part(
+        _stable_memory: &mut StableMemoryStream,
+        main_object: *mut Bits64,
+    ) -> Self {
+        StableBits64 {
+            bits: (*main_object).bits as u64,
+        }
+    }
+
+    unsafe fn deserialize_static_part(
+        &self,
+        target_bits64: *mut Bits64,
+        object_kind: StableObjectKind,
+    ) {
+        (*target_bits64).header.tag = decode_bits64_tag(object_kind);
+        (*target_bits64)
+            .header
+            .init_forward(Value::from_ptr(target_bits64 as usize));
+        (*target_bits64).bits = self.bits;
+    }
+}
+
+fn decode_bits64_tag(object_kind: StableObjectKind) -> Tag {
+    match object_kind {
+        StableObjectKind::Bits64Unsigned => TAG_BITS64_U,
+        StableObjectKind::Bits64Signed => TAG_BITS64_S,
+        StableObjectKind::Bits64Float => TAG_BITS64_F,
+        _ => unreachable!("invalid stable boxed word 64 tag"),
+    }
+}
diff --git a/rts/motoko-rts/src/stabilization/layout/stable_blob.rs b/rts/motoko-rts/src/stabilization/layout/stable_blob.rs
new file mode 100644
index 00000000000..97620328e9e
--- /dev/null
+++ b/rts/motoko-rts/src/stabilization/layout/stable_blob.rs
@@ -0,0 +1,112 @@
+use crate::{
+    memory::{alloc_blob, Memory},
+    stabilization::{
+        deserialization::stable_memory_access::StableMemoryAccess,
+        serialization::{
+            stable_memory_stream::{ScanStream, StableMemoryStream, WriteStream},
+            SerializationContext,
+        },
+    },
+    types::{size_of, Blob, Bytes, Tag, Value, TAG_BLOB_A, TAG_BLOB_B, TAG_BLOB_P, TAG_BLOB_T},
+};
+
+use super::{
+    round_to_u64, write_padding_u64, Serializer, StableObjectKind, StableToSpace, StableValue,
+    StaticScanner,
+};
+
+#[repr(C)]
+#[derive(Default)]
+pub struct StableBlob {
+    byte_length: u64,
+    // Dynamically sized body with `byte_length` bytes. No pointers to be scanned.
+    // Zero padding to align to next `u64`.
+    // Note: The rounding of object sizes to at least 2 bytes is necessary for the skewed pointer representation.
+}
+
+impl StableBlob {
+    pub fn byte_length(&self) -> u64 {
+        self.byte_length
+    }
+}
+
+impl StaticScanner for StableBlob {}
+
+impl Serializer for StableBlob {
+    unsafe fn serialize_static_part(
+        _stable_memory: &mut StableMemoryStream,
+        main_object: *mut Blob,
+    ) -> Self {
+        StableBlob {
+            byte_length: main_object.len().as_usize() as u64,
+        }
+    }
+
+    unsafe fn serialize_dynamic_part(
+        stable_memory: &mut StableMemoryStream,
+        main_object: *mut Blob,
+    ) {
+        let byte_length = main_object.len().as_usize();
+        stable_memory.raw_write(main_object.payload_addr() as usize, byte_length);
+        write_padding_u64(stable_memory, byte_length);
+    }
+
+    fn scan_serialized_dynamic<
+        'a,
+        M,
+        F: Fn(&mut SerializationContext<'a, M>, StableValue) -> StableValue,
+    >(
+        &self,
+        context: &mut SerializationContext<'a, M>,
+        _translate: &F,
+    ) {
+        let rounded_length = round_to_u64(self.byte_length);
+        context
+            .serialization
+            .to_space()
+            .skip(rounded_length as usize);
+    }
+
+    unsafe fn allocate_deserialized(
+        &self,
+        main_memory: &mut M,
+        object_kind: StableObjectKind,
+    ) -> Value {
+        let blob_tag = decode_blob_tag(object_kind);
+        let blob_length = self.byte_length as usize;
+        alloc_blob(main_memory, blob_tag, Bytes(blob_length))
+    }
+
+    unsafe fn deserialize_static_part(
+        &self,
+        target_blob: *mut Blob,
+        object_kind: StableObjectKind,
+    ) {
+        debug_assert_eq!((*target_blob).header.tag, decode_blob_tag(object_kind));
+        debug_assert_eq!((*target_blob).len.as_usize(), self.byte_length as usize);
+    }
+
+    unsafe fn deserialize_dynamic_part(
+        &self,
+        _main_memory: &mut M,
+        stable_memory: &StableMemoryAccess,
+        stable_object: StableValue,
+        target_blob: *mut Blob,
+    ) {
+        let stable_address = stable_object.payload_address();
+        let source_payload = stable_address + size_of::().to_bytes().as_usize() as u64;
+        let target_payload = target_blob.payload_addr() as usize;
+        let blob_length = self.byte_length as usize;
+        stable_memory.raw_read(source_payload, target_payload, blob_length);
+    }
+}
+
+fn decode_blob_tag(object_kind: StableObjectKind) -> Tag {
+    match object_kind {
+        StableObjectKind::BlobBytes => TAG_BLOB_B,
+        StableObjectKind::BlobText => TAG_BLOB_T,
+        StableObjectKind::BlobPrincipal => TAG_BLOB_P,
+        StableObjectKind::BlobActor => TAG_BLOB_A,
+        _ => unreachable!("invalid stable blob tag"),
+    }
+}
diff --git a/rts/motoko-rts/src/stabilization/layout/stable_concat.rs b/rts/motoko-rts/src/stabilization/layout/stable_concat.rs
new file mode 100644
index 00000000000..1f31c02c209
--- /dev/null
+++ b/rts/motoko-rts/src/stabilization/layout/stable_concat.rs
@@ -0,0 +1,54 @@
+use crate::{
+    stabilization::serialization::stable_memory_stream::StableMemoryStream,
+    types::{Bytes, Concat, Value, TAG_CONCAT},
+};
+
+use super::{Serializer, StableObjectKind, StableValue, StaticScanner};
+
+#[repr(C)]
+pub struct StableConcat {
+    number_of_bytes: u64,
+    text1: StableValue,
+    text2: StableValue,
+}
+
+impl StaticScanner for StableConcat {
+    fn update_pointers StableValue>(
+        &mut self,
+        context: &mut C,
+        translate: &F,
+    ) -> bool {
+        self.text1 = translate(context, self.text1);
+        self.text2 = translate(context, self.text2);
+        true
+    }
+}
+
+impl Serializer for StableConcat {
+    unsafe fn serialize_static_part(
+        _stable_memory: &mut StableMemoryStream,
+        main_object: *mut Concat,
+    ) -> Self {
+        StableConcat {
+            number_of_bytes: (*main_object).n_bytes.as_usize() as u64,
+            text1: StableValue::serialize(main_object.text1()),
+            text2: StableValue::serialize(main_object.text2()),
+        }
+    }
+
+    unsafe fn deserialize_static_part(
+        &self,
+        target_concat: *mut Concat,
+        object_kind: StableObjectKind,
+    ) {
+        debug_assert_eq!(object_kind, StableObjectKind::Concat);
+        let n_bytes = Bytes(self.number_of_bytes as usize);
+        (*target_concat).header.tag = TAG_CONCAT;
+        (*target_concat)
+            .header
+            .init_forward(Value::from_ptr(target_concat as usize));
+        (*target_concat).n_bytes = n_bytes;
+        (*target_concat).text1 = self.text1.deserialize();
+        (*target_concat).text2 = self.text2.deserialize();
+    }
+}
diff --git a/rts/motoko-rts/src/stabilization/layout/stable_mutbox.rs b/rts/motoko-rts/src/stabilization/layout/stable_mutbox.rs
new file mode 100644
index 00000000000..e9b1f8f4160
--- /dev/null
+++ b/rts/motoko-rts/src/stabilization/layout/stable_mutbox.rs
@@ -0,0 +1,46 @@
+use crate::{
+    stabilization::serialization::stable_memory_stream::StableMemoryStream,
+    types::{MutBox, Value, TAG_MUTBOX},
+};
+
+use super::{Serializer, StableObjectKind, StableValue, StaticScanner};
+
+#[repr(C)]
+pub struct StableMutBox {
+    field: StableValue,
+}
+
+impl StaticScanner for StableMutBox {
+    fn update_pointers StableValue>(
+        &mut self,
+        context: &mut C,
+        translate: &F,
+    ) -> bool {
+        self.field = translate(context, self.field);
+        true
+    }
+}
+
+impl Serializer for StableMutBox {
+    unsafe fn serialize_static_part(
+        _stable_memory: &mut StableMemoryStream,
+        main_object: *mut MutBox,
+    ) -> Self {
+        StableMutBox {
+            field: StableValue::serialize((*main_object).field),
+        }
+    }
+
+    unsafe fn deserialize_static_part(
+        &self,
+        target_mutbox: *mut MutBox,
+        object_kind: StableObjectKind,
+    ) {
+        debug_assert_eq!(object_kind, StableObjectKind::MutBox);
+        (*target_mutbox).header.tag = TAG_MUTBOX;
+        (*target_mutbox)
+            .header
+            .init_forward(Value::from_ptr(target_mutbox as usize));
+        (*target_mutbox).field = self.field.deserialize();
+    }
+}
diff --git a/rts/motoko-rts/src/stabilization/layout/stable_object.rs b/rts/motoko-rts/src/stabilization/layout/stable_object.rs
new file mode 100644
index 00000000000..94ec0c41910
--- /dev/null
+++ b/rts/motoko-rts/src/stabilization/layout/stable_object.rs
@@ -0,0 +1,145 @@
+use crate::{
+    memory::Memory,
+    stabilization::{
+        deserialization::stable_memory_access::StableMemoryAccess,
+        layout::{stable_blob::StableBlob, StableObjectKind},
+        serialization::{
+            stable_memory_stream::{ScanStream, StableMemoryStream, WriteStream},
+            SerializationContext,
+        },
+    },
+    types::{size_of, FwdPtr, Object, Tag, Value, Words, TAG_FWD_PTR, TAG_OBJECT},
+};
+
+use super::{Serializer, StableTag, StableToSpace, StableValue, StaticScanner};
+
+#[repr(C)]
+pub struct StableObject {
+    size: u64, // Number of fields.
+    hash_blob: StableValue, // Pointer to a blob containing the `u32` hashes of the field labels.
+               // Dynamically sized body with `size` fields, each of `StableValue`, ordered according to the hashes in the blob.
+}
+
+impl StaticScanner for StableObject {
+    fn update_pointers StableValue>(
+        &mut self,
+        context: &mut C,
+        translate: &F,
+    ) -> bool {
+        self.hash_blob = translate(context, self.hash_blob);
+        true
+    }
+}
+
+impl Serializer for StableObject {
+    unsafe fn serialize_static_part(
+        stable_memory: &mut StableMemoryStream,
+        main_object: *mut Object,
+    ) -> Self {
+        StableObject {
+            size: get_object_size(stable_memory, main_object) as u64,
+            hash_blob: StableValue::serialize((*main_object).hash_blob),
+        }
+    }
+
+    unsafe fn serialize_dynamic_part(
+        stable_memory: &mut StableMemoryStream,
+        main_object: *mut Object,
+    ) {
+        let object_size = get_object_size(stable_memory, main_object);
+        for index in 0..object_size {
+            let main_field = main_object.get(index);
+            let stable_field = StableValue::serialize(main_field);
+            stable_memory.write(&stable_field);
+        }
+    }
+
+    fn scan_serialized_dynamic<
+        'a,
+        M,
+        F: Fn(&mut SerializationContext<'a, M>, StableValue) -> StableValue,
+    >(
+        &self,
+        context: &mut SerializationContext<'a, M>,
+        translate: &F,
+    ) {
+        for _ in 0..self.size {
+            let old_value = context.serialization.to_space().read::();
+            // On a longer term, the GC could remove unnecessary fields (during evacuation) that have been
+            // declared in old program versions but which name does no longer exist in a new program version.
+            let new_value = translate(context, old_value);
+            context.serialization.to_space().update(&new_value);
+        }
+    }
+
+    unsafe fn allocate_deserialized(
+        &self,
+        main_memory: &mut M,
+        object_kind: StableObjectKind,
+    ) -> Value {
+        debug_assert_eq!(object_kind, StableObjectKind::Object);
+        let total_size = size_of::() + Words(self.size as usize);
+        main_memory.alloc_words(total_size)
+    }
+
+    unsafe fn deserialize_static_part(
+        &self,
+        target_object: *mut Object,
+        object_kind: StableObjectKind,
+    ) {
+        debug_assert_eq!(object_kind, StableObjectKind::Object);
+        (*target_object).header.tag = TAG_OBJECT;
+        (*target_object)
+            .header
+            .init_forward(Value::from_ptr(target_object as usize));
+        (*target_object).hash_blob = self.hash_blob.deserialize();
+    }
+
+    unsafe fn deserialize_dynamic_part(
+        &self,
+        _main_memory: &mut M,
+        stable_memory: &StableMemoryAccess,
+        stable_object: StableValue,
+        target_object: *mut Object,
+    ) {
+        let stable_address = stable_object.payload_address();
+        for index in 0..self.size {
+            let field_address = stable_address
+                + size_of::().to_bytes().as_usize() as u64
+                + (index * size_of::().to_bytes().as_usize() as u64);
+            let field = stable_memory.read::(field_address);
+            let target_field_address = target_object.payload_addr().add(index as usize);
+            *target_field_address = field.deserialize();
+        }
+    }
+}
+
+#[repr(C)]
+struct HashBlob {
+    tag: StableTag,
+    header: StableBlob,
+}
+
+/// Resolve object size during serialization.
+/// This requires a look up in the hash blob, which may however already have been
+/// serialized to stable memory.
+fn get_object_size(stable_memory: &StableMemoryStream, main_object: *mut Object) -> usize {
+    // Do not call tag as it resolves the forwarding pointer.
+    unsafe {
+        let main_hash_blob = (*main_object).hash_blob;
+        let main_tag = *(main_hash_blob.get_ptr() as *const Tag);
+        if main_tag == TAG_FWD_PTR {
+            // The Hash blob has already been moved to stable memory.
+            let target_location = (*(main_hash_blob.get_ptr() as *mut FwdPtr)).fwd;
+            let stable_offset = target_location.get_ptr() as u64;
+            let stable_hash_blob = stable_memory.read_preceding::(stable_offset);
+            assert!(stable_hash_blob.tag.decode() == StableObjectKind::BlobBytes);
+            let hash_blob_length = stable_hash_blob.header.byte_length() as usize;
+            let hash_entry_length = size_of::().to_bytes().as_usize();
+            debug_assert_eq!(hash_blob_length % hash_entry_length, 0);
+            hash_blob_length / hash_entry_length
+        } else {
+            main_object.size()
+        }
+    }
+}
diff --git a/rts/motoko-rts/src/stabilization/layout/stable_region.rs b/rts/motoko-rts/src/stabilization/layout/stable_region.rs
new file mode 100644
index 00000000000..661dae5b2ca
--- /dev/null
+++ b/rts/motoko-rts/src/stabilization/layout/stable_region.rs
@@ -0,0 +1,52 @@
+use crate::{
+    stabilization::serialization::stable_memory_stream::StableMemoryStream,
+    types::{Region, Value, TAG_REGION},
+};
+
+use super::{Serializer, StableObjectKind, StableValue, StaticScanner};
+
+#[repr(C)]
+pub struct StableRegion {
+    id: u64,
+    page_count: u64,
+    vec_pages: StableValue, // Blob of `u16`, each denoting a page ID.
+}
+
+impl StaticScanner for StableRegion {
+    fn update_pointers StableValue>(
+        &mut self,
+        context: &mut C,
+        translate: &F,
+    ) -> bool {
+        self.vec_pages = translate(context, self.vec_pages);
+        true
+    }
+}
+
+impl Serializer for StableRegion {
+    unsafe fn serialize_static_part(
+        _stable_memory: &mut StableMemoryStream,
+        main_object: *mut Region,
+    ) -> Self {
+        StableRegion {
+            id: (*main_object).id as u64,
+            page_count: (*main_object).page_count as u64,
+            vec_pages: StableValue::serialize((*main_object).vec_pages),
+        }
+    }
+
+    unsafe fn deserialize_static_part(
+        &self,
+        target_region: *mut Region,
+        object_kind: StableObjectKind,
+    ) {
+        debug_assert_eq!(object_kind, StableObjectKind::Region);
+        (*target_region).header.tag = TAG_REGION;
+        (*target_region)
+            .header
+            .init_forward(Value::from_ptr(target_region as usize));
+        (*target_region).id = self.id;
+        (*target_region).page_count = self.page_count as usize;
+        (*target_region).vec_pages = self.vec_pages.deserialize();
+    }
+}
diff --git a/rts/motoko-rts/src/stabilization/layout/stable_some.rs b/rts/motoko-rts/src/stabilization/layout/stable_some.rs
new file mode 100644
index 00000000000..29b118037f7
--- /dev/null
+++ b/rts/motoko-rts/src/stabilization/layout/stable_some.rs
@@ -0,0 +1,46 @@
+use crate::{
+    stabilization::serialization::stable_memory_stream::StableMemoryStream,
+    types::{Some, Value, TAG_SOME},
+};
+
+use super::{Serializer, StableObjectKind, StableValue, StaticScanner};
+
+#[repr(C)]
+pub struct StableSome {
+    field: StableValue,
+}
+
+impl StaticScanner for StableSome {
+    fn update_pointers StableValue>(
+        &mut self,
+        context: &mut C,
+        translate: &F,
+    ) -> bool {
+        self.field = translate(context, self.field);
+        true
+    }
+}
+
+impl Serializer for StableSome {
+    unsafe fn serialize_static_part(
+        _stable_memory: &mut StableMemoryStream,
+        main_object: *mut Some,
+    ) -> Self {
+        StableSome {
+            field: StableValue::serialize((*main_object).field),
+        }
+    }
+
+    unsafe fn deserialize_static_part(
+        &self,
+        target_some: *mut Some,
+        object_kind: StableObjectKind,
+    ) {
+        debug_assert_eq!(object_kind, StableObjectKind::Some);
+        (*target_some).header.tag = TAG_SOME;
+        (*target_some)
+            .header
+            .init_forward(Value::from_ptr(target_some as usize));
+        (*target_some).field = self.field.deserialize();
+    }
+}
diff --git a/rts/motoko-rts/src/stabilization/layout/stable_variant.rs b/rts/motoko-rts/src/stabilization/layout/stable_variant.rs
new file mode 100644
index 00000000000..1016d00e75c
--- /dev/null
+++ b/rts/motoko-rts/src/stabilization/layout/stable_variant.rs
@@ -0,0 +1,49 @@
+use crate::{
+    stabilization::serialization::stable_memory_stream::StableMemoryStream,
+    types::{Value, Variant, TAG_VARIANT},
+};
+
+use super::{Serializer, StableObjectKind, StableValue, StaticScanner};
+
+#[repr(C)]
+pub struct StableVariant {
+    tag: u64,
+    field: StableValue,
+}
+
+impl StaticScanner for StableVariant {
+    fn update_pointers StableValue>(
+        &mut self,
+        context: &mut C,
+        translate: &F,
+    ) -> bool {
+        self.field = translate(context, self.field);
+        true
+    }
+}
+
+impl Serializer for StableVariant {
+    unsafe fn serialize_static_part(
+        _stable_memory: &mut StableMemoryStream,
+        main_object: *mut Variant,
+    ) -> Self {
+        StableVariant {
+            tag: (*main_object).tag as u64,
+            field: StableValue::serialize((*main_object).field),
+        }
+    }
+
+    unsafe fn deserialize_static_part(
+        &self,
+        target_variant: *mut Variant,
+        object_kind: StableObjectKind,
+    ) {
+        debug_assert_eq!(object_kind, StableObjectKind::Variant);
+        (*target_variant).header.tag = TAG_VARIANT;
+        (*target_variant)
+            .header
+            .init_forward(Value::from_ptr(target_variant as usize));
+        (*target_variant).tag = self.tag as usize;
+        (*target_variant).field = self.field.deserialize();
+    }
+}
diff --git a/rts/motoko-rts/src/stabilization/serialization.rs b/rts/motoko-rts/src/stabilization/serialization.rs
new file mode 100644
index 00000000000..fcf23b75671
--- /dev/null
+++ b/rts/motoko-rts/src/stabilization/serialization.rs
@@ -0,0 +1,202 @@
+pub mod stable_memory_stream;
+
+use crate::{
+    memory::Memory,
+    stabilization::layout::serialize,
+    types::{FwdPtr, Tag, Value, TAG_CLOSURE, TAG_FWD_PTR},
+};
+
+use self::stable_memory_stream::{ScanStream, StableMemoryStream};
+
+use super::{
+    graph_copy::{limit::ExecutionMonitor, GraphCopy},
+    layout::{scan_serialized, StableToSpace, StableValue},
+    DUMMY_VALUE,
+};
+
+pub struct Serialization {
+    to_space: StableMemoryStream,
+    limit: ExecutionMonitor,
+    array_slice: Option,
+}
+
+pub struct ArraySlice {
+    pub next_index: u64,
+    pub array_length: u64,
+}
+
+impl ArraySlice {
+    pub fn new(index: u64, length: u64) -> ArraySlice {
+        ArraySlice {
+            next_index: index,
+            array_length: length,
+        }
+    }
+}
+
+/// Helper type to pass serialization context instead of closures.
+pub struct SerializationContext<'a, M> {
+    pub serialization: &'a mut Serialization,
+    pub mem: &'a mut M,
+}
+
+impl<'a, M> SerializationContext<'a, M> {
+    fn new(serialization: &'a mut Serialization, mem: &'a mut M) -> SerializationContext<'a, M> {
+        SerializationContext { serialization, mem }
+    }
+}
+
+/// Graph-copy-based serialization.
+/// Notes:
+/// - Invalidates the heap by replacing reachable stable object by forwarding objects:
+/// The heap is finally no longer usable by mutator or GC.
+/// - `copy` and partially also `scan` depends on the heap layout. Adjust these functions
+/// whenever the heap layout is changed.
+/// Usage:
+/// ```
+/// let serialization = Serialization::start(root, stable_start);
+/// while !serialization.is_completed() {
+///     serialization.copy_increment();
+/// }
+/// ```
+impl Serialization {
+    /// Start the graph-copy-based heap serialization from the stable `root` object
+    /// by writing the serialized data to the stable memory at offset `stable_start`.
+    /// The start is followed by a series of copy increments before the serialization is completed.
+    pub fn start(mem: &mut M, root: Value, stable_start: u64) -> Serialization {
+        let to_space = StableMemoryStream::open(stable_start);
+        let limit = ExecutionMonitor::new();
+        let mut serialization = Serialization {
+            limit,
+            to_space,
+            array_slice: None,
+        };
+        serialization.start(mem, root);
+        serialization
+    }
+
+    pub fn serialized_data_start(&self) -> u64 {
+        self.to_space.base_address()
+    }
+
+    pub fn serialized_data_length(&self) -> u64 {
+        self.to_space.written_length()
+    }
+
+    /// Resolve the Brooks forwarding pointer of the incremental GC by considering potential
+    /// forwarding objects (`FwdPtr`) used in Cheney's algorithm for stabilization.
+    unsafe fn resolve_gc_forwarding(object: Value) -> Value {
+        let tag = Self::read_object_tag(object);
+        if tag == TAG_FWD_PTR {
+            object
+        } else {
+            object.forward()
+        }
+    }
+    /// Read the object tag by considering potential forwarding objects (`FwdPtr`).
+    unsafe fn read_object_tag(object: Value) -> Tag {
+        // Do not call `tag()` as it dereferences the Brooks forwarding pointer of the incremental GC,
+        // which does not exist for the forwarding objects (`FwdPtr`) used by the Cheney's algorithm.
+        *(object.get_ptr() as *const Tag)
+    }
+
+    fn has_non_stable_type(old_field: Value) -> bool {
+        unsafe { old_field.tag() == TAG_CLOSURE }
+    }
+
+    pub fn pending_array_scanning(&self) -> bool {
+        self.array_slice.is_some()
+    }
+
+    pub fn get_array_slice(&mut self) -> ArraySlice {
+        self.array_slice.take().unwrap()
+    }
+
+    pub fn set_array_slice(&mut self, slice: ArraySlice) {
+        self.array_slice = Some(slice);
+    }
+
+    fn processed_memory(&self) -> u64 {
+        self.to_space.written_length() + self.to_space.scanned_length()
+    }
+}
+
+impl GraphCopy for Serialization {
+    fn get_forward_address(&self, object: Value) -> Option {
+        unsafe {
+            let object = Self::resolve_gc_forwarding(object);
+            let tag = Self::read_object_tag(object);
+            match tag {
+                TAG_FWD_PTR => {
+                    let new_location = (*(object.get_ptr() as *mut FwdPtr)).fwd;
+                    Some(StableValue::serialize(new_location))
+                }
+                _ => None,
+            }
+        }
+    }
+
+    fn set_forward_address(&mut self, object: Value, target: StableValue) {
+        unsafe {
+            let object = Self::resolve_gc_forwarding(object);
+            debug_assert!(object.is_obj());
+            let fwd = object.get_ptr() as *mut FwdPtr;
+            (*fwd).tag = TAG_FWD_PTR;
+            (*fwd).fwd = target.deserialize();
+        }
+    }
+
+    fn copy(&mut self, _mem: &mut M, object: Value) -> StableValue {
+        unsafe {
+            let object = Self::resolve_gc_forwarding(object);
+            debug_assert!(object.is_obj());
+            let address = self.to_space.written_length();
+            serialize(&mut self.to_space, object);
+            debug_assert!(self.to_space.written_length() >= address);
+            StableValue::from_stable_address(address)
+        }
+    }
+
+    fn scan(&mut self, mem: &mut M) {
+        scan_serialized(
+            &mut SerializationContext::new(self, mem),
+            &|context, original| {
+                let old_value = original.deserialize();
+                if old_value.is_non_null_ptr() {
+                    if Self::has_non_stable_type(old_value) {
+                        // Due to structural subtyping or `Any`-subtyping, a non-stable object (such as a closure) may be
+                        // be dynamically reachable from a stable varibale. The value is not accessible in the new program version.
+                        // Therefore, the content of these fields can serialized with a dummy value that is also ignored by the GC.
+                        DUMMY_VALUE
+                    } else {
+                        context.serialization.evacuate(context.mem, old_value)
+                    }
+                } else {
+                    original
+                }
+            },
+        );
+    }
+
+    fn scanning_completed(&self) -> bool {
+        self.to_space.scan_completed()
+    }
+
+    fn complete(&mut self) {
+        self.to_space.close();
+    }
+
+    fn time_over(&mut self) -> bool {
+        self.limit.is_exceeded(self.processed_memory())
+    }
+
+    fn reset_time(&mut self) {
+        self.limit.reset(self.processed_memory());
+    }
+}
+
+impl StableToSpace for Serialization {
+    fn to_space(&mut self) -> &mut StableMemoryStream {
+        &mut self.to_space
+    }
+}
diff --git a/rts/motoko-rts/src/stabilization/serialization/stable_memory_stream.rs b/rts/motoko-rts/src/stabilization/serialization/stable_memory_stream.rs
new file mode 100644
index 00000000000..488fb8db989
--- /dev/null
+++ b/rts/motoko-rts/src/stabilization/serialization/stable_memory_stream.rs
@@ -0,0 +1,147 @@
+//! Streamed read/write access to stable memory.
+//! Supporting Cheney's to-space in stable memory.
+
+use core::mem::{size_of, MaybeUninit};
+
+use crate::{
+    stabilization::{
+        deserialization::stable_memory_access::StableMemoryAccess, grant_stable_space,
+    },
+    stable_mem::{ic0_stable64_read, ic0_stable64_write},
+};
+
+/// Streamed reader/writer on stable memory.
+/// Used for the to-space during stabilization.
+///
+/// The memory supports two location-independent streams:
+/// * Streamed reading and updating, used for scanning and patching pointers.
+/// * Streamed writing, used for allocating new objects.
+pub struct StableMemoryStream {
+    /// The pointers used in the serialized stable memory layout are
+    /// relative to this start address of the to-space.
+    base_address: u64,
+    /// Used for reading and updating.
+    scan_address: u64,
+    /// Used for writing.
+    free_address: u64,
+}
+
+pub trait ScanStream {
+    // Determines whether the stream has reached the end.
+    fn scan_completed(&self) -> bool;
+    // Read a value from the stream.
+    fn read(&mut self) -> T;
+    // Read raw data from the stream.
+    fn raw_read(&mut self, data_address: usize, length: usize);
+    // Skip data in the stream.
+    fn skip(&mut self, length: usize);
+    // Overwrite the value right before the stream position.
+    fn update(&mut self, value: &T);
+    // Overwrite raw data right before the stream position.
+    fn raw_update(&mut self, data_address: usize, length: usize);
+}
+
+pub trait WriteStream {
+    // Append a value at the stream end.
+    fn write(&mut self, value: &T);
+    // Append raw data at the stream end.
+    fn raw_write(&mut self, data_address: usize, length: usize);
+}
+
+impl StableMemoryStream {
+    pub fn open(start_address: u64) -> StableMemoryStream {
+        StableMemoryStream {
+            base_address: start_address,
+            scan_address: start_address,
+            free_address: start_address,
+        }
+    }
+
+    pub fn close(&mut self) {
+        debug_assert!(self.scan_address <= self.free_address);
+    }
+
+    /// Start address of the serialized data in stable memory.
+    pub fn base_address(&self) -> u64 {
+        self.base_address
+    }
+
+    /// Size of this memory space. Also serves to determine the addresses of a subsequently
+    /// written object in the serialized stable memory format.
+    pub fn written_length(&self) -> u64 {
+        debug_assert!(self.base_address <= self.free_address);
+        self.free_address - self.base_address
+    }
+
+    pub fn scanned_length(&self) -> u64 {
+        debug_assert!(self.base_address <= self.scan_address);
+        self.scan_address - self.base_address
+    }
+
+    pub fn read_preceding(&self, offset: u64) -> T {
+        let length = self.free_address - self.base_address;
+        let access = StableMemoryAccess::open(self.base_address, length);
+        access.read::(offset)
+    }
+}
+
+impl ScanStream for StableMemoryStream {
+    fn scan_completed(&self) -> bool {
+        debug_assert!(self.scan_address <= self.free_address);
+        self.scan_address == self.free_address
+    }
+
+    fn read(&mut self) -> T {
+        let length = size_of::();
+        let mut value = unsafe { MaybeUninit::::uninit().assume_init() };
+        let value_address = &mut value as *mut T as usize;
+        self.raw_read(value_address, length);
+        value
+    }
+
+    fn raw_read(&mut self, data_address: usize, length: usize) {
+        debug_assert!(self.scan_address + length as u64 <= self.free_address);
+        unsafe {
+            ic0_stable64_read(data_address as u64, self.scan_address, length as u64);
+        }
+        self.scan_address += length as u64;
+    }
+
+    fn skip(&mut self, length: usize) {
+        debug_assert!(self.scan_address + length as u64 <= self.free_address);
+        self.scan_address += length as u64;
+    }
+
+    fn update(&mut self, value: &T) {
+        let length = size_of::();
+        let value_address = value as *const T as usize;
+        self.raw_update(value_address, length);
+    }
+
+    fn raw_update(&mut self, data_address: usize, length: usize) {
+        debug_assert!(length as u64 <= self.scan_address);
+        unsafe {
+            ic0_stable64_write(
+                self.scan_address - length as u64,
+                data_address as u64,
+                length as u64,
+            );
+        }
+    }
+}
+
+impl WriteStream for StableMemoryStream {
+    fn write(&mut self, value: &T) {
+        let length = size_of::();
+        let value_address = value as *const T as usize;
+        self.raw_write(value_address, length);
+    }
+
+    fn raw_write(&mut self, data_address: usize, length: usize) {
+        unsafe {
+            grant_stable_space(self.free_address + length as u64);
+            ic0_stable64_write(self.free_address, data_address as u64, length as u64);
+        }
+        self.free_address += length as u64;
+    }
+}
diff --git a/rts/motoko-rts/src/stable_mem.rs b/rts/motoko-rts/src/stable_mem.rs
index 29dc5d04c3d..40480e6d80d 100644
--- a/rts/motoko-rts/src/stable_mem.rs
+++ b/rts/motoko-rts/src/stable_mem.rs
@@ -1,47 +1,71 @@
+#[cfg(feature = "ic")]
+use motoko_rts_macros::enhanced_orthogonal_persistence;
+
 pub const PAGE_SIZE: u64 = 64 * 1024;
 
 extern "C" {
     // physical ic0_stable64 operations re-exported by moc
     pub fn ic0_stable64_write(offset: u64, src: u64, size: u64);
     pub fn ic0_stable64_read(dst: u64, offset: u64, size: u64);
+    /// Physical memory size.
+    pub fn ic0_stable64_size() -> u64;
+    /// Grow the physiscal memory by ignoring the compiler-specified stable memory limit.
+    pub fn ic0_stable64_grow(additional_pages: u64) -> u64;
+
+    // (virtual) stable_mem operations implemented by moc
+    #[cfg(feature = "ic")]
+    pub fn moc_stable_mem_get_version() -> usize;
+    #[cfg(feature = "ic")]
+    pub fn moc_stable_mem_set_version(version: usize);
+    /// Virtual memory size.
+    #[cfg(feature = "ic")]
+    pub fn moc_stable_mem_get_size() -> u64;
+    /// Initialize the virtual memory size.
+    #[cfg(feature = "ic")]
+    pub fn moc_stable_mem_set_size(pages: u64);
+    /// Grow the virtual memory by respecting the compiler-specified virtual memory limit.
     #[cfg(feature = "ic")]
-    pub fn ic0_stable64_size() -> u64; // physical memory size
-                                       // (virtual) stable_mem operations implemented by moc
-    pub fn moc_stable_mem_get_version() -> u32;
-    pub fn moc_stable_mem_set_version(version: u32);
-    pub fn moc_stable_mem_size() -> u64;
     pub fn moc_stable_mem_grow(additional_pages: u64) -> u64;
 }
 
-pub fn get_version() -> u32 {
+#[cfg(feature = "ic")]
+pub fn get_version() -> usize {
     unsafe { moc_stable_mem_get_version() }
 }
 
-pub fn set_version(version: u32) {
+#[cfg(feature = "ic")]
+pub fn set_version(version: usize) {
     unsafe { moc_stable_mem_set_version(version) }
 }
 
+/// Virtual memory size.
+#[cfg(feature = "ic")]
 pub fn size() -> u64 {
     // SAFETY: This is safe because of the ic0 api guarantees.
-    unsafe { moc_stable_mem_size() }
+    unsafe { moc_stable_mem_get_size() }
 }
 
+/// Grow the virtual memory by respecting the compiler-specified stable memory limit.
+#[cfg(feature = "ic")]
 pub fn grow(pages: u64) -> u64 {
     // SAFETY: This is safe because of the ic0 api guarantees.
     unsafe { moc_stable_mem_grow(pages) }
 }
 
+#[cfg(feature = "ic")]
 pub fn read(offset: u64, dst: &mut [u8]) {
     // SAFETY: This is safe because of the ic0 api guarantees.
     unsafe { ic0_stable64_read(dst.as_ptr() as u64, offset, dst.len() as u64) }
 }
 
+#[cfg(feature = "ic")]
 pub fn write(offset: u64, src: &[u8]) {
     // SAFETY: This is safe because of the ic0 api guarantees.
     unsafe { ic0_stable64_write(offset, src.as_ptr() as u64, src.len() as u64) }
 }
 
 // Little endian.
+#[cfg(feature = "ic")]
 pub fn read_u8(offset: u64) -> u8 {
     let mut res: [u8; 1] = [0; 1];
     read(offset, &mut res);
@@ -49,11 +73,13 @@ pub fn read_u8(offset: u64) -> u8 {
 }
 
 // Little endian.
+#[cfg(feature = "ic")]
 pub fn write_u8(offset: u64, value: u8) {
     write(offset, &core::primitive::u8::to_le_bytes(value));
 }
 
 // Little endian.
+#[cfg(feature = "ic")]
 pub fn read_u16(offset: u64) -> u16 {
     let mut res: [u8; 2] = [0; 2];
     read(offset, &mut res);
@@ -61,11 +87,13 @@ pub fn read_u16(offset: u64) -> u16 {
 }
 
 // Little endian.
+#[cfg(feature = "ic")]
 pub fn write_u16(offset: u64, value: u16) {
     write(offset, &core::primitive::u16::to_le_bytes(value));
 }
 
 // Little endian.
+#[cfg(feature = "ic")]
 pub fn read_u32(offset: u64) -> u32 {
     let mut res: [u8; 4] = [0; 4];
     read(offset, &mut res);
@@ -73,11 +101,13 @@ pub fn read_u32(offset: u64) -> u32 {
 }
 
 // Little endian.
+#[cfg(feature = "ic")]
 pub fn write_u32(offset: u64, value: u32) {
     write(offset, &core::primitive::u32::to_le_bytes(value));
 }
 
 // Little endian.
+#[cfg(feature = "ic")]
 pub fn read_u64(offset: u64) -> u64 {
     let mut res: [u8; 8] = [0; 8];
     read(offset, &mut res);
@@ -85,13 +115,15 @@ pub fn read_u64(offset: u64) -> u64 {
 }
 
 // Little endian.
+#[cfg(feature = "ic")]
 pub fn write_u64(offset: u64, n: u64) {
     write(offset, &core::primitive::u64::to_le_bytes(n));
 }
 
 #[cfg(feature = "ic")]
 #[no_mangle]
-pub extern "C" fn read_persistence_version() -> u32 {
+#[enhanced_orthogonal_persistence]
+pub extern "C" fn read_persistence_version() -> usize {
     use crate::region::{LEGACY_VERSION_NO_STABLE_MEMORY, VERSION_STABLE_HEAP_NO_REGIONS};
 
     let physical_pages = unsafe { ic0_stable64_size() };
@@ -104,7 +136,8 @@ pub extern "C" fn read_persistence_version() -> u32 {
         // It stores non-zero marker at address 0 -> Legacy version 0.
         return LEGACY_VERSION_NO_STABLE_MEMORY;
     }
+    // Note: Do not use `types::size_of()` as it rounds to 64-bit words.
     let address = physical_pages * PAGE_SIZE - core::mem::size_of::() as u64;
     let version = read_u32(address);
-    version
+    version as usize
 }
diff --git a/rts/motoko-rts/src/static_checks.rs b/rts/motoko-rts/src/static_checks.rs
index 99f8f8ae5eb..0ba21302b4d 100644
--- a/rts/motoko-rts/src/static_checks.rs
+++ b/rts/motoko-rts/src/static_checks.rs
@@ -1,36 +1,67 @@
 //! Compile-time assertions to make sure object layouts are as expected
 
-use crate::constants::*;
-use crate::types::*;
+use motoko_rts_macros::{
+    classical_persistence, enhanced_orthogonal_persistence, incremental_gc, is_incremental_gc,
+    non_incremental_gc, uses_enhanced_orthogonal_persistence,
+};
+
+use crate::{constants::MAX_ARRAY_LENGTH_FOR_ITERATOR, types::*};
 
 use core::mem::{align_of, size_of};
 
 // `_` suppresses "unused X" warnings so we don't get any warnings for the code below, but they use
 // `WORD_SIZE` so we get an "unused constant WORD_SIZE" warning without `allow(unused)` here.
 #[allow(unused)]
-const WORD_SIZE: usize = crate::constants::WORD_SIZE as usize;
+const WORD_SIZE: usize = crate::constants::WORD_SIZE;
 
 #[allow(unused)]
+#[incremental_gc]
 const HEADER_SIZE: usize = 2 * WORD_SIZE;
 
+#[allow(unused)]
+#[non_incremental_gc]
+const HEADER_SIZE: usize = WORD_SIZE;
+
 // We cannot use `assert_eq` below as `assert_eq` is not const yet
 
 // Check platform word size
+#[enhanced_orthogonal_persistence]
+const _: () = assert!(size_of::() == size_of::());
+
+#[classical_persistence]
 const _: () = assert!(size_of::() == size_of::());
+
 const _: () = assert!(size_of::() == WORD_SIZE);
 
 // Check that sizes of structs are as expected by the compiler
 // (Expectations are all over the place, e.g. `header_size` definitions in `compile.ml`, calls to `static_closure`, etc.)
 const _: () = assert!(size_of::() == HEADER_SIZE);
 const _: () = assert!(size_of::() == HEADER_SIZE + 2 * WORD_SIZE);
+const _: () = assert!(size_of::() == HEADER_SIZE + 1 * WORD_SIZE);
 const _: () = assert!(size_of::() == HEADER_SIZE + 1 * WORD_SIZE);
-const _: () = assert!(size_of::() == HEADER_SIZE + 4 * WORD_SIZE);
+
+const _: () = assert!(
+    size_of::()
+        == if uses_enhanced_orthogonal_persistence!() {
+            HEADER_SIZE + 3 * WORD_SIZE
+        } else {
+            HEADER_SIZE + 4 * WORD_SIZE
+        }
+); // Enhanced orthogonal persistence: Three 32-bit components, padding, followed by one 64-bit pointer in `mp_int`
+
 const _: () = assert!(size_of::() == HEADER_SIZE + 1 * WORD_SIZE);
 const _: () = assert!(size_of::() == HEADER_SIZE + 1 * WORD_SIZE);
 const _: () = assert!(size_of::() == HEADER_SIZE + 2 * WORD_SIZE);
 const _: () = assert!(size_of::() == HEADER_SIZE + 3 * WORD_SIZE);
-const _: () = assert!(size_of::() == HEADER_SIZE + 1 * WORD_SIZE);
-const _: () = assert!(size_of::() == HEADER_SIZE + 2 * WORD_SIZE);
+
+const _: () = assert!(
+    size_of::()
+        == if uses_enhanced_orthogonal_persistence!() {
+            HEADER_SIZE + 1 * WORD_SIZE
+        } else {
+            HEADER_SIZE + 2 * WORD_SIZE
+        }
+);
 
 // These aren't used generated by the compiler
 const _: () = assert!(size_of::() == 1 * WORD_SIZE);
@@ -42,12 +73,12 @@ const _: () = assert!(size_of::() == 2 * WORD_SIZE);
 const _: () = assert!(align_of::() == WORD_SIZE);
 const _: () = assert!(align_of::() == WORD_SIZE);
 const _: () = assert!(align_of::() == WORD_SIZE);
+const _: () = assert!(align_of::() == WORD_SIZE);
 const _: () = assert!(align_of::() == WORD_SIZE);
 const _: () = assert!(align_of::() == WORD_SIZE);
 const _: () = assert!(align_of::() == WORD_SIZE);
 const _: () = assert!(align_of::() == WORD_SIZE);
 const _: () = assert!(align_of::() == WORD_SIZE);
-const _: () = assert!(align_of::() == WORD_SIZE);
 const _: () = assert!(align_of::() == WORD_SIZE);
 const _: () = assert!(align_of::() == WORD_SIZE);
 const _: () = assert!(align_of::() == WORD_SIZE);
@@ -59,4 +90,7 @@ const _: () =
     assert!(TAG_ARRAY_I < TAG_ARRAY_M && TAG_ARRAY_M < TAG_ARRAY_T && TAG_ARRAY_T < TAG_ARRAY_S);
 // 2-bits suffice to encode base array tag in slice, remaining bits suffice to encode slice start.
 const _: () = assert!((TAG_ARRAY_S - TAG_ARRAY_I) / 2 < 4);
-const _: () = assert!(MAX_ARRAY_SIZE < (1 << (usize::BITS - 2)));
+const _: () = assert!(MAX_ARRAY_LENGTH_FOR_ITERATOR < (1 << (usize::BITS - 2)));
+
+// Check that the incremental GC is used with enhanced orthogonal persistence.
+const _: () = assert!(!uses_enhanced_orthogonal_persistence!() || is_incremental_gc!());
diff --git a/rts/motoko-rts/src/stream.rs b/rts/motoko-rts/src/stream.rs
new file mode 100644
index 00000000000..b23620ca434
--- /dev/null
+++ b/rts/motoko-rts/src/stream.rs
@@ -0,0 +1,216 @@
+//! The implementation of streaming serialisation.
+//! Only used for 32-bit classical persistence.
+//!
+//! When serialising Motoko stable variables to stable memory we used to first completely
+//! fill up an in-heap buffer and then copy that wholesale into stable memory. This can be
+//! disadvantageous for two reasons:
+//!  - double copying
+//!  - heap congestion (especially for the compacting collector)
+//!
+//! Instead now we'll only allocate a small(ish) blob that will serve as a temporary storage
+//! for bytes in transit, while bigger chunks will flush this staging area before being written
+//! directly to destination.
+//!
+//!
+
+// Layout of a stream node (32-bit only):
+//
+// ┌────────────┬─────┬─────────┬───────┬─────────┬─────────┬───────────┬────────┬──────────┐
+// │ obj header │ len │ padding | ptr64 │ start64 │ limit64 │ outputter │ filled │ cache... │
+// └────────────┴─────┴─────────┴───────┴─────────┴─────────┴───────────┴────────┴──────────┘
+//
+// We reuse the opaque nature of blobs (to Motoko) and stick Rust-related information
+// into the leading bytes:
+// - `obj header` contains tag (BLOB) and forwarding pointer
+// - `len` is in blob metadata
+// - `ptr64`, `start64`, and `limit64` are each represented as two 32-bit components
+//    in little endian encoding.
+// - `ptr64` and `limit64` are the next and past-end pointers into stable memory
+// - `filled` and `cache` are the number of bytes consumed from the blob, and the
+//   staging area of the stream, respectively
+// - `outputter` is the function to be called when `len - filled` approaches zero.
+// - INVARIANT: keep `BlobStream.{ptr64_field, start64_field, filled_field}`,
+//              (from `compile.ml`) in sync with the layout!
+// - Note: `len` and `filled` are relative to the encompassing blob.
+
+use crate::barriers::allocation_barrier;
+use crate::bigint::{check, mp_get_u32, mp_isneg, mp_iszero};
+use crate::mem_utils::memcpy_bytes;
+use crate::memory::{alloc_blob, Memory};
+use crate::rts_trap_with;
+use crate::tommath_bindings::{mp_div_2d, mp_int};
+use crate::types::{size_of, Blob, Bytes, Stream, Value, TAG_BLOB_B};
+
+use motoko_rts_macros::ic_mem_fn;
+
+/// NOTE: The stream implementation assumes 32-bit memory and has not been ported to 64-bit.
+/// It is not used by enhanced orthogonal persistence.
+const _: () = assert!(usize::BITS == 32);
+
+const MAX_STREAM_SIZE: Bytes = Bytes((1 << 30) - 1);
+const INITIAL_STREAM_FILLED: Bytes = Bytes(32);
+const STREAM_CHUNK_SIZE: Bytes = Bytes(128);
+
+#[ic_mem_fn]
+pub unsafe fn alloc_stream(mem: &mut M, size: Bytes) -> *mut Stream {
+    debug_assert_eq!(
+        INITIAL_STREAM_FILLED,
+        (size_of::() - size_of::()).to_bytes()
+    );
+    if size > MAX_STREAM_SIZE {
+        rts_trap_with("alloc_stream: Cache too large");
+    }
+    let ptr = alloc_blob(mem, TAG_BLOB_B, size + INITIAL_STREAM_FILLED);
+    let stream = ptr.as_stream();
+    stream.write_ptr64(0);
+    stream.write_start64(0);
+    stream.write_limit64(0);
+    (*stream).outputter = Stream::no_backing_store;
+    (*stream).filled = INITIAL_STREAM_FILLED;
+    allocation_barrier(ptr);
+    stream
+}
+
+#[allow(dead_code)]
+extern "C" {
+    // generated by `moc`
+    fn ic0_stable64_write(to: u64, ptr: u64, n: u64);
+}
+
+impl Stream {
+    #[inline]
+    pub unsafe fn cache_addr(self: *const Self) -> *const u8 {
+        self.add(1) as *const u8 // skip closure header
+    }
+
+    /// make sure that the cache is empty
+    fn flush(self: *mut Self) {
+        unsafe {
+            if (*self).filled > INITIAL_STREAM_FILLED {
+                ((*self).outputter)(
+                    self,
+                    self.cache_addr(),
+                    (*self).filled - INITIAL_STREAM_FILLED,
+                );
+                (*self).filled = INITIAL_STREAM_FILLED
+            }
+        }
+    }
+
+    fn no_backing_store(self: *mut Self, _ptr: *const u8, _n: Bytes) {
+        assert!(false)
+    }
+
+    #[cfg(feature = "ic")]
+    fn send_to_stable(self: *mut Self, ptr: *const u8, n: Bytes) {
+        unsafe {
+            let next_ptr64 = self.read_ptr64() + n.as_usize() as u64;
+            ic0_stable64_write(self.read_ptr64(), ptr as u64, n.as_usize() as u64);
+            self.write_ptr64(next_ptr64);
+        }
+    }
+
+    #[cfg(feature = "ic")]
+    /// Sets up the bottleneck routine to output towards a range of stable memory
+    /// Note: assumes that the entire byte range is writable
+    #[export_name = "stream_stable_dest"]
+    pub fn setup_stable_dest(self: *mut Self, start: u64, limit: u64) {
+        unsafe {
+            self.write_ptr64(start);
+            self.write_start64(start);
+            self.write_limit64(limit);
+            (*self).outputter = Self::send_to_stable;
+        }
+    }
+
+    /// Ingest a number of bytes into the stream.
+    #[export_name = "stream_write"]
+    pub fn cache_bytes(self: *mut Self, ptr: *const u8, n: Bytes) {
+        unsafe {
+            if self.read_limit64() != 0 && n > STREAM_CHUNK_SIZE
+                || (*self).filled + n > (*self).header.len
+            {
+                self.flush();
+                ((*self).outputter)(self, ptr, n);
+            } else {
+                let dest = self
+                    .as_blob_mut()
+                    .payload_addr()
+                    .add((*self).filled.as_usize());
+                (*self).filled += n;
+                assert!((*self).filled <= (*self).header.len);
+                memcpy_bytes(dest as usize, ptr as usize, n);
+            }
+        }
+    }
+
+    /// Ingest a single byte into the stream.
+    #[inline]
+    #[export_name = "stream_write_byte"]
+    pub fn cache_byte(self: *mut Self, byte: u8) {
+        unsafe {
+            if (*self).filled >= (*self).header.len {
+                self.flush()
+            }
+            self.as_blob_mut().set((*self).filled.as_usize(), byte);
+            (*self).filled += Bytes(1)
+        }
+    }
+
+    /// Return a pointer to a reserved area of the cache and advance the
+    /// fill indicator beyond it.
+    #[export_name = "stream_reserve"]
+    pub fn reserve(self: *mut Self, bytes: Bytes) -> *mut u8 {
+        unsafe {
+            if (*self).filled + bytes > (*self).header.len {
+                self.flush()
+            }
+            let ptr = self
+                .as_blob_mut()
+                .payload_addr()
+                .add((*self).filled.as_usize());
+            (*self).filled += bytes;
+            ptr
+        }
+    }
+
+    /// like `bigint_leb128_encode_go`, but to a stream
+    pub(crate) unsafe fn write_leb128(self: *mut Stream, tmp: *mut mp_int, add_bit: bool) {
+        debug_assert!(!mp_isneg(tmp));
+
+        loop {
+            let byte = mp_get_u32(tmp) as u8;
+            check(mp_div_2d(tmp, 7, tmp, core::ptr::null_mut()));
+            if !mp_iszero(tmp) || (add_bit && byte & (1 << 6) != 0) {
+                self.cache_byte(byte | (1 << 7));
+            } else {
+                return self.cache_byte(byte);
+            }
+        }
+    }
+
+    /// Split the stream object into two `Blob`s, a front-runner (small) one
+    /// and a latter one that comprises the current amount of the cached bytes.
+    /// Lengths are adjusted correspondingly.
+    #[export_name = "stream_split"]
+    pub unsafe fn split(self: *mut Self) -> Value {
+        if (*self).header.len > (*self).filled {
+            self.as_blob_mut().shrink((*self).filled);
+        }
+        (*self).header.len = INITIAL_STREAM_FILLED - size_of::().to_bytes();
+        (*self).filled -= INITIAL_STREAM_FILLED;
+        let blob = (self.cache_addr() as *mut Blob).sub(1);
+        (*blob).header.tag = TAG_BLOB_B;
+        let ptr = Value::from_ptr(blob as usize);
+        (*blob).header.init_forward(ptr);
+        debug_assert_eq!(blob.len(), (*self).filled);
+        allocation_barrier(ptr)
+    }
+
+    /// Shut down the stream by outputting all data. Lengths are
+    /// adjusted correspondingly, and the stream remains intact.
+    #[export_name = "stream_shutdown"]
+    pub unsafe fn shutdown(self: *mut Self) {
+        self.flush()
+    }
+}
diff --git a/rts/motoko-rts/src/text.rs b/rts/motoko-rts/src/text.rs
index 2f089aa08c8..3d1750ec9c7 100644
--- a/rts/motoko-rts/src/text.rs
+++ b/rts/motoko-rts/src/text.rs
@@ -35,17 +35,23 @@ use crate::types::{size_of, Blob, Bytes, Concat, Value, TAG_BLOB_T, TAG_CONCAT};
 use alloc::string::String;
 use core::cmp::{min, Ordering};
 use core::{slice, str};
+use motoko_rts_macros::classical_persistence;
+
+use crate::libc_declarations::memcmp;
+
+#[classical_persistence]
+use crate::types::Stream;
 
 use motoko_rts_macros::ic_mem_fn;
 
-const MAX_STR_SIZE: Bytes = Bytes((1 << 30) - 1);
+const MAX_STR_SIZE: Bytes = Bytes((1 << (usize::BITS - 2)) - 1);
 
 // Strings smaller than this must be blobs
 // Make this MAX_STR_SIZE to disable the use of ropes completely, e.g. for debugging
-const MIN_CONCAT_SIZE: Bytes = Bytes(9);
+const MIN_CONCAT_SIZE: Bytes = Bytes(9);
 
 // Note: Post allocation barrier needs to be applied after initilization.
-unsafe fn alloc_text_blob(mem: &mut M, size: Bytes) -> Value {
+unsafe fn alloc_text_blob(mem: &mut M, size: Bytes) -> Value {
     if size > MAX_STR_SIZE {
         rts_trap_with("alloc_text_blob: Text too large");
     }
@@ -53,7 +59,7 @@ unsafe fn alloc_text_blob(mem: &mut M, size: Bytes) -> Value {
 }
 
 #[ic_mem_fn]
-pub unsafe fn text_of_ptr_size(mem: &mut M, buf: *const u8, n: Bytes) -> Value {
+pub unsafe fn text_of_ptr_size(mem: &mut M, buf: *const u8, n: Bytes) -> Value {
     let blob = alloc_text_blob(mem, n);
     let payload_addr = blob.as_blob_mut().payload_addr();
     memcpy_bytes(payload_addr as usize, buf as usize, n);
@@ -61,7 +67,7 @@ pub unsafe fn text_of_ptr_size(mem: &mut M, buf: *const u8, n: Bytes<
 }
 
 pub unsafe fn text_of_str(mem: &mut M, s: &str) -> Value {
-    text_of_ptr_size(mem, s.as_ptr(), Bytes(s.len() as u32))
+    text_of_ptr_size(mem, s.as_ptr(), Bytes(s.len()))
 }
 
 #[ic_mem_fn]
@@ -152,7 +158,7 @@ unsafe extern "C" fn text_to_buf(mut s: Value, mut buf: *mut u8) {
             let s1_len = text_size(s1);
             let s2_len = text_size(s2);
 
-            if s2_len < Bytes(core::mem::size_of::() as u32) {
+            if s2_len < Bytes(core::mem::size_of::()) {
                 // If second string is smaller than size of a crumb just do it directly
                 text_to_buf(s2, buf.add(s1_len.as_usize()));
                 s = s1;
@@ -168,6 +174,24 @@ unsafe extern "C" fn text_to_buf(mut s: Value, mut buf: *mut u8) {
     }
 }
 
+#[no_mangle]
+#[classical_persistence]
+unsafe extern "C" fn stream_write_text(stream: *mut Stream, mut s: Value) {
+    use crate::types::TAG_BLOB_B;
+    loop {
+        let s_ptr = s.as_obj();
+        if s_ptr.tag() == TAG_BLOB_B || s_ptr.tag() == TAG_BLOB_T {
+            let blob = s_ptr.as_blob();
+            stream.cache_bytes(blob.payload_addr(), blob.len());
+            break;
+        } else {
+            let concat = s_ptr.as_concat();
+            stream_write_text(stream, concat.text1());
+            s = concat.text2()
+        }
+    }
+}
+
 // Straighten into contiguous memory, if needed (e.g. for system calls)
 #[ic_mem_fn]
 pub unsafe fn blob_of_text(mem: &mut M, s: Value) -> Value {
@@ -184,7 +208,7 @@ pub unsafe fn blob_of_text(mem: &mut M, s: Value) -> Value {
 
 /// Size of the text, in bytes
 #[no_mangle]
-pub unsafe extern "C" fn text_size(s: Value) -> Bytes {
+pub unsafe extern "C" fn text_size(s: Value) -> Bytes {
     // We don't know whether the string is a blob or concat, but both types have the length in same
     // location so using any of the types to get the length is fine
     // NB. We can't use `s.as_blob()` here as that method checks the tag in debug mode
@@ -195,10 +219,10 @@ pub unsafe extern "C" fn text_size(s: Value) -> Bytes {
 /// Compares texts from given offset on for the given number of bytes. All assumed to be in range.
 unsafe fn text_compare_range(
     s1: Value,
-    offset1: Bytes,
+    offset1: Bytes,
     s2: Value,
-    offset2: Bytes,
-    n: Bytes,
+    offset2: Bytes,
+    n: Bytes,
 ) -> Ordering {
     // Follow the left/right strings of concat nodes until we reach to blobs or concats that cannot
     // be split further (the range spans left and right strings)
@@ -244,7 +268,7 @@ unsafe fn text_compare_range(
         let s1_blob = s1_obj.as_blob();
         let s2_blob = s2_obj.as_blob();
 
-        let cmp = libc::memcmp(
+        let cmp = memcmp(
             s1_blob.payload_addr().add(offset1.as_usize()) as *const _,
             s2_blob.payload_addr().add(offset2.as_usize()) as *const _,
             n.as_usize(),
@@ -264,9 +288,9 @@ unsafe fn text_compare_range(
 /// split further (i.e. range spans left and right nodes). Returns a BLOB or CONCAT.
 unsafe fn text_get_range(
     mut s: Value,
-    mut offset: Bytes,
-    n: Bytes,
-) -> (Value, Bytes) {
+    mut offset: Bytes,
+    n: Bytes,
+) -> (Value, Bytes) {
     loop {
         let s_obj = s.as_obj();
 
@@ -299,7 +323,7 @@ unsafe fn text_get_range(
 }
 
 #[no_mangle]
-pub unsafe extern "C" fn text_compare(s1: Value, s2: Value) -> i32 {
+pub unsafe extern "C" fn text_compare(s1: Value, s2: Value) -> isize {
     let n1 = text_size(s1);
     let n2 = text_size(s2);
     let n = min(n1, n2);
@@ -320,14 +344,14 @@ pub unsafe extern "C" fn text_compare(s1: Value, s2: Value) -> i32 {
 }
 
 #[no_mangle]
-pub unsafe extern "C" fn blob_compare(s1: Value, s2: Value) -> i32 {
+pub unsafe extern "C" fn blob_compare(s1: Value, s2: Value) -> isize {
     let n1 = text_size(s1);
     let n2 = text_size(s2);
     let n = min(n1, n2);
 
     let payload1 = s1.as_blob().payload_const();
     let payload2 = s2.as_blob().payload_const();
-    let cmp = libc::memcmp(payload1 as *const _, payload2 as *const _, n.as_usize());
+    let cmp = memcmp(payload1 as *const _, payload2 as *const _, n.as_usize()) as isize;
 
     if cmp == 0 {
         if n1 < n2 {
@@ -344,7 +368,7 @@ pub unsafe extern "C" fn blob_compare(s1: Value, s2: Value) -> i32 {
 
 /// Length in characters
 #[no_mangle]
-pub unsafe extern "C" fn text_len(text: Value) -> u32 {
+pub unsafe extern "C" fn text_len(text: Value) -> usize {
     if text.tag() == TAG_BLOB_T {
         let blob = text.as_blob();
         let payload_addr = blob.payload_const();
@@ -355,7 +379,7 @@ pub unsafe extern "C" fn text_len(text: Value) -> u32 {
             len.as_usize(),
         ))
         .chars()
-        .count() as u32
+        .count()
     } else {
         let concat = text.as_concat();
         text_len(concat.text1()) + text_len(concat.text2())
@@ -363,14 +387,14 @@ pub unsafe extern "C" fn text_len(text: Value) -> u32 {
 }
 
 /// Decodes the character at the pointer. Returns the character, the size via the `size` parameter
-pub unsafe fn decode_code_point(s: *const u8, size: *mut u32) -> u32 {
+pub unsafe fn decode_code_point(s: *const u8, size: *mut usize) -> u32 {
     // 0xxxxxxx
     // 110xxxxx 10xxxxxx
     // 1110xxxx 10xxxxxx 10xxxxxx
     // 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
 
     let (size, mut value) = {
-        let leading_ones = (*s).leading_ones();
+        let leading_ones = (*s).leading_ones() as usize;
         if leading_ones == 0 {
             *size = 1;
             return *s as u32;
@@ -382,24 +406,24 @@ pub unsafe fn decode_code_point(s: *const u8, size: *mut u32) -> u32 {
 
     for i in 1..size {
         value <<= 6;
-        value += ((*s.add(i as usize)) & 0b00111111) as u32;
+        value += ((*s.add(i)) & 0b00111111) as u32;
     }
 
-    value
+    value as u32
 }
 
 /// Allocate a text from a character
 #[ic_mem_fn]
 pub unsafe fn text_singleton(mem: &mut M, char: u32) -> Value {
     let mut buf = [0u8; 4];
-    let str_len = char::from_u32_unchecked(char).encode_utf8(&mut buf).len() as u32;
+    let str_len = char::from_u32_unchecked(char).encode_utf8(&mut buf).len();
 
     let blob_ptr = alloc_text_blob(mem, Bytes(str_len));
 
     let blob = blob_ptr.as_blob_mut();
 
     for i in 0..str_len {
-        blob.set(i, buf[i as usize]);
+        blob.set(i, buf[i]);
     }
 
     allocation_barrier(blob_ptr)
@@ -429,7 +453,7 @@ where
     ));
     let string = to_string(&str);
     let bytes = string.as_bytes();
-    let lowercase = alloc_blob(mem, TAG_BLOB_T, Bytes(bytes.len() as u32));
+    let lowercase = alloc_blob(mem, TAG_BLOB_T, Bytes(bytes.len()));
     let mut i = 0;
     let target_ptr = lowercase.as_blob_mut().payload_addr();
     for b in bytes {
diff --git a/rts/motoko-rts/src/text_iter.rs b/rts/motoko-rts/src/text_iter.rs
index 3f51f68fb1d..d7b4acdab8d 100644
--- a/rts/motoko-rts/src/text_iter.rs
+++ b/rts/motoko-rts/src/text_iter.rs
@@ -18,8 +18,8 @@ use crate::types::{Value, TAG_ARRAY_T, TAG_BLOB_T, TAG_CONCAT};
 
 use motoko_rts_macros::ic_mem_fn;
 
-const TODO_TEXT_IDX: u32 = 0;
-const TODO_LINK_IDX: u32 = 1;
+const TODO_TEXT_IDX: usize = 0;
+const TODO_LINK_IDX: usize = 1;
 
 /// Find the left-most leaf of a text, putting all the others onto a list. Used to enforce the
 /// invariant about TEXT_ITER_BLOB to be a blob.
@@ -44,9 +44,9 @@ unsafe fn find_leaf(mem: &mut M, mut text: Value, todo: *mut Value) -
     text
 }
 
-const ITER_BLOB_IDX: u32 = 0;
-const ITER_POS_IDX: u32 = 1;
-const ITER_TODO_IDX: u32 = 2;
+const ITER_BLOB_IDX: usize = 0;
+const ITER_POS_IDX: usize = 1;
+const ITER_TODO_IDX: usize = 2;
 
 // Use non-pointer sentinel value as `null` to allow simpler visitor logic.
 // Anlogous to the design of `continuation_table` and `persistence`.
@@ -59,7 +59,7 @@ pub unsafe fn text_iter(mem: &mut M, text: Value) -> Value {
     let array = iter.as_array();
 
     // Initialize the TODO field first, to be able to use it use the location to `find_leaf`
-    let todo_addr = array.payload_addr().add(ITER_TODO_IDX as usize) as *mut _;
+    let todo_addr = array.payload_addr().add(ITER_TODO_IDX) as *mut _;
     *todo_addr = NO_OBJECT;
 
     // Initialize position field
@@ -76,13 +76,13 @@ pub unsafe fn text_iter(mem: &mut M, text: Value) -> Value {
 
 /// Returns whether the iterator is finished
 #[no_mangle]
-pub unsafe extern "C" fn text_iter_done(iter: Value) -> u32 {
+pub unsafe extern "C" fn text_iter_done(iter: Value) -> usize {
     let array = iter.as_array();
     let pos = array.get(ITER_POS_IDX).get_scalar();
     let blob = array.get(ITER_BLOB_IDX).as_blob();
     let todo = array.get(ITER_TODO_IDX);
 
-    if pos >= blob.len().as_u32() && todo == NO_OBJECT {
+    if pos >= blob.len().as_usize() && todo == NO_OBJECT {
         1
     } else {
         0
@@ -98,7 +98,7 @@ pub unsafe fn text_iter_next(mem: &mut M, iter: Value) -> u32 {
     let pos = iter_array.get(ITER_POS_IDX).get_scalar();
 
     // If we are at the end of the current blob, find the next blob
-    if pos >= blob.len().as_u32() {
+    if pos >= blob.len().as_usize() {
         let todo = iter_array.get(ITER_TODO_IDX);
 
         if todo == NO_OBJECT {
@@ -117,7 +117,7 @@ pub unsafe fn text_iter_next(mem: &mut M, iter: Value) -> u32 {
 
             todo_array.set(TODO_TEXT_IDX, (*concat).text2, mem);
             iter_array.set(ITER_POS_IDX, Value::from_scalar(0), mem);
-            let todo_addr = iter_array.payload_addr().add(ITER_TODO_IDX as usize);
+            let todo_addr = iter_array.payload_addr().add(ITER_TODO_IDX);
 
             iter_array.set(
                 ITER_BLOB_IDX,
@@ -140,8 +140,8 @@ pub unsafe fn text_iter_next(mem: &mut M, iter: Value) -> u32 {
     } else {
         // We are not at the end, read the next character from the blob
         let blob_payload = blob.payload_const();
-        let mut step: u32 = 0;
-        let char = decode_code_point(blob_payload.add(pos as usize), &mut step as *mut u32);
+        let mut step = 0;
+        let char = decode_code_point(blob_payload.add(pos), &mut step as *mut usize);
         iter_array.set(ITER_POS_IDX, Value::from_scalar(pos + step), mem);
         char
     }
diff --git a/rts/motoko-rts/src/tommath_bindings.rs b/rts/motoko-rts/src/tommath_bindings.rs
index 705acbf9739..bb723c19d53 100644
--- a/rts/motoko-rts/src/tommath_bindings.rs
+++ b/rts/motoko-rts/src/tommath_bindings.rs
@@ -1,2 +1,9 @@
 #![allow(unused, non_camel_case_types)]
-include!("../../_build/tommath_bindings.rs");
+
+use motoko_rts_macros::{classical_persistence, enhanced_orthogonal_persistence};
+
+#[classical_persistence]
+include!("../../_build/wasm32/tommath_bindings.rs");
+
+#[enhanced_orthogonal_persistence]
+include!("../../_build/wasm64/tommath_bindings.rs");
diff --git a/rts/motoko-rts/src/types.rs b/rts/motoko-rts/src/types.rs
index 2fc260650e1..929694c63e3 100644
--- a/rts/motoko-rts/src/types.rs
+++ b/rts/motoko-rts/src/types.rs
@@ -19,35 +19,38 @@
 // [1]: https://github.com/rust-lang/reference/blob/master/src/types/struct.md
 // [2]: https://doc.rust-lang.org/stable/reference/type-layout.html#the-c-representation
 
+use motoko_rts_macros::{
+    classical_persistence, enhanced_orthogonal_persistence, incremental_gc, is_incremental_gc,
+    non_incremental_gc,
+};
+
 use crate::barriers::{init_with_barrier, write_with_barrier};
 use crate::memory::Memory;
 use crate::tommath_bindings::{mp_digit, mp_int};
 use core::ops::{Add, AddAssign, Div, Mul, Sub, SubAssign};
 use core::ptr::null;
 
-use crate::constants::{MAX_ARRAY_SIZE, WORD_SIZE};
+use crate::constants::{MAX_ARRAY_LENGTH_FOR_ITERATOR, WORD_SIZE};
 use crate::rts_trap_with;
 
-pub fn size_of() -> Words {
-    Bytes(::core::mem::size_of::() as u32).to_words()
+pub fn size_of() -> Words {
+    Bytes(::core::mem::size_of::()).to_words()
 }
 
-/// The unit "words": `Words(123u32)` means 123 words.
+// TODO: Refactor by removing the generic type from `Words` and `Bytes`.
+
+/// The unit "words": `Words(123)` means 123 words.
 #[repr(transparent)]
 #[derive(PartialEq, Eq, Clone, Copy, PartialOrd, Ord)]
 pub struct Words(pub A);
 
-impl Words {
-    pub fn to_bytes(self) -> Bytes {
+impl Words {
+    pub fn to_bytes(self) -> Bytes {
         Bytes(self.0 * WORD_SIZE)
     }
 
-    pub fn as_u32(self) -> u32 {
-        self.0
-    }
-
     pub fn as_usize(self) -> usize {
-        self.0 as usize
+        self.0
     }
 }
 
@@ -95,30 +98,31 @@ impl SubAssign for Words {
     }
 }
 
-impl From> for Words {
-    fn from(bytes: Bytes) -> Words {
+impl From> for Words {
+    fn from(bytes: Bytes) -> Words {
         bytes.to_words()
     }
 }
 
-/// The unit "bytes": `Bytes(123u32)` means 123 bytes.
+/// The unit "bytes": `Bytes(123)` means 123 bytes.
 #[repr(transparent)]
 #[derive(Debug, PartialEq, Eq, Clone, Copy, PartialOrd, Ord)]
 pub struct Bytes(pub A);
 
-impl Bytes {
+impl Bytes {
     // Rounds up
-    pub fn to_words(self) -> Words {
+    pub fn to_words(self) -> Words {
         // Rust issue for adding ceiling_div: https://github.com/rust-lang/rfcs/issues/2844
         Words((self.0 + WORD_SIZE - 1) / WORD_SIZE)
     }
 
-    pub fn as_u32(self) -> u32 {
+    pub const fn as_usize(self) -> usize {
         self.0
     }
 
-    pub fn as_usize(self) -> usize {
-        self.0 as usize
+    /// Rust `next_multiple_of` is unstable, see https://github.com/rust-lang/rust/issues/88581.
+    pub fn next_multiple_of(self, multiple: usize) -> Self {
+        Bytes((self.0 + multiple - 1) / multiple * multiple)
     }
 }
 
@@ -150,24 +154,25 @@ impl SubAssign for Bytes {
     }
 }
 
-impl From> for Bytes {
-    fn from(words: Words) -> Bytes {
+impl From> for Bytes {
+    fn from(words: Words) -> Bytes {
         words.to_bytes()
     }
 }
 
 // The `true` value. The only scalar value that has the lowest bit set.
-pub const TRUE_VALUE: u32 = 0x1;
+pub const TRUE_VALUE: usize = 0x1;
 
 /// Constant sentinel pointer value for fast null tests.
 /// Points to the last unallocated Wasm page.
 /// See also `compile.ml` for other reserved sentinel values.
-pub const NULL_POINTER: Value = Value::from_raw(0xffff_fffb);
+#[enhanced_orthogonal_persistence]
+pub const NULL_POINTER: Value = Value::from_raw(0xffff_ffff_ffff_fffb);
 
 /// A value in a heap slot
 #[repr(transparent)]
 #[derive(Clone, Copy, PartialEq, Eq, Debug)]
-pub struct Value(u32);
+pub struct Value(usize);
 
 /// A view of `Value` for analyzing the slot contents.
 pub enum PtrOrScalar {
@@ -175,7 +180,7 @@ pub enum PtrOrScalar {
     Ptr(usize),
 
     /// Slot is an unboxed scalar value
-    Scalar(u32),
+    Scalar(usize),
 }
 
 impl PtrOrScalar {
@@ -193,26 +198,26 @@ impl Value {
     pub const fn from_ptr(ptr: usize) -> Self {
         // Cannot use `debug_assert_eq` in const yet, so using `debug_assert`
         debug_assert!(ptr & 0b1 == 0b0);
-        Value(skew(ptr) as u32)
+        Value(skew(ptr))
     }
 
     /// Create a value from a scalar
-    pub const fn from_scalar(value: u32) -> Self {
+    pub const fn from_scalar(value: usize) -> Self {
         // Cannot use `debug_assert_eq` in const yet, so using `debug_assert`
-        debug_assert!(value >> 31 == 0);
+        debug_assert!(value >> (usize::BITS - 1) == 0);
         Value(value << 1)
     }
 
     /// Create a value from a signed scalar. The scalar must be obtained with `get_signed_scalar`.
     /// Using `get_scalar` will return an incorrect scalar.
-    pub fn from_signed_scalar(value: i32) -> Self {
+    pub fn from_signed_scalar(value: isize) -> Self {
         debug_assert_eq!(value, value << 1 >> 1);
-        Value((value << 1) as u32)
+        Value((value << 1) as usize)
     }
 
     /// Create a value from raw representation. Useful when e.g. temporarily writing invalid values
     /// to object fields in garbage collection.
-    pub const fn from_raw(raw: u32) -> Self {
+    pub const fn from_raw(raw: usize) -> Self {
         Value(raw)
     }
 
@@ -225,7 +230,7 @@ impl Value {
     /// cycle.
     pub fn get(&self) -> PtrOrScalar {
         if is_ptr(self.0) {
-            PtrOrScalar::Ptr(unskew(self.0 as usize))
+            PtrOrScalar::Ptr(unskew(self.0))
         } else {
             PtrOrScalar::Scalar(self.0 >> 1)
         }
@@ -233,7 +238,7 @@ impl Value {
 
     /// Get the raw value
     #[inline]
-    pub fn get_raw(&self) -> u32 {
+    pub const fn get_raw(&self) -> usize {
         self.0
     }
 
@@ -242,56 +247,72 @@ impl Value {
         self.get().is_scalar()
     }
 
+    /// Is the value a pointer?
+    #[classical_persistence]
+    pub fn is_ptr(&self) -> bool {
+        self.get().is_ptr()
+    }
+
     /// Is the value a non-null pointer?
+    #[enhanced_orthogonal_persistence]
     pub fn is_non_null_ptr(&self) -> bool {
         self.get().is_ptr() && *self != NULL_POINTER
     }
 
     /// Assumes that the value is a scalar and returns the scalar value. In debug mode panics if
     /// the value is not a scalar.
-    pub fn get_scalar(&self) -> u32 {
+    pub fn get_scalar(&self) -> usize {
         debug_assert!(self.get().is_scalar());
         self.0 >> 1
     }
 
     /// Assumes that the value is a signed scalar and returns the scalar value. In debug mode
     /// panics if the value is not a scalar.
-    pub fn get_signed_scalar(&self) -> i32 {
+    pub fn get_signed_scalar(&self) -> isize {
         debug_assert!(self.get().is_scalar());
-        self.0 as i32 >> 1
+        self.0 as isize >> 1
     }
 
     /// Assumes that the value is a pointer and returns the pointer value. In debug mode panics if
     /// the value is not a pointer.
     pub fn get_ptr(self) -> usize {
         debug_assert!(self.get().is_ptr());
-        unskew(self.0 as usize)
+        unskew(self.0)
     }
 
     /// Check that the forwarding pointer is valid.
     #[inline]
     pub unsafe fn check_forwarding_pointer(self) {
-        debug_assert!(
-            self.forward().get_ptr() == self.get_ptr()
-                || self.forward().forward().get_ptr() == self.forward().get_ptr()
-        );
+        if is_incremental_gc!() {
+            debug_assert!(
+                self.forward().get_ptr() == self.get_ptr()
+                    || self.forward().forward().get_ptr() == self.forward().get_ptr()
+            );
+        }
     }
 
     /// Check whether the object's forwarding pointer refers to a different location.
     pub unsafe fn is_forwarded(self) -> bool {
-        self.check_forwarding_pointer();
-        self.forward().get_ptr() != self.get_ptr()
+        if is_incremental_gc!() {
+            self.check_forwarding_pointer();
+            self.forward().get_ptr() != self.get_ptr()
+        } else {
+            false
+        }
     }
 
     /// Get the object tag. No forwarding. Can be applied to any block, regular objects
     /// with a header as well as `OneWordFiller`, `FwdPtr`, and `FreeSpace`.
     /// In debug mode panics if the value is not a pointer.
     pub unsafe fn tag(self) -> Tag {
+        #[enhanced_orthogonal_persistence]
         debug_assert_ne!(self, NULL_POINTER);
+
         *(self.get_ptr() as *const Tag)
     }
 
     /// Get the forwarding pointer. Used by the incremental GC.
+    #[incremental_gc]
     pub unsafe fn forward(self) -> Value {
         debug_assert!(self.is_obj());
         debug_assert!(self.get_ptr() as *const Obj != null());
@@ -299,7 +320,25 @@ impl Value {
         (*obj).forward
     }
 
+    /// Get the forwarding pointer. Used without the incremental GC.
+    #[non_incremental_gc]
+    pub unsafe fn forward(self) -> Value {
+        self
+    }
+
     /// Resolve forwarding if the value is a pointer. Otherwise, return the same value.
+    #[classical_persistence]
+    pub unsafe fn forward_if_possible(self) -> Value {
+        // Second condition: Ignore raw null addresses used in `text_iter`.
+        if is_incremental_gc!() && self.is_ptr() && self.get_ptr() as *const Obj != null() {
+            self.forward()
+        } else {
+            self
+        }
+    }
+
+    /// Resolve forwarding if the value is a pointer. Otherwise, return the same value.
+    #[enhanced_orthogonal_persistence]
     pub unsafe fn forward_if_possible(self) -> Value {
         // Second condition: Ignore raw null addresses used in `text_iter`.
         if self.is_non_null_ptr() && self.get_ptr() as *const Obj != null() {
@@ -351,7 +390,7 @@ impl Value {
 
     /// Get the pointer as `Object` using forwarding. In debug mode panics if the value is not a pointer.
     pub unsafe fn as_object(self) -> *mut Object {
-        debug_assert!(self.get().is_ptr());
+        debug_assert!(self.tag() == TAG_OBJECT);
         self.check_forwarding_pointer();
         self.forward().get_ptr() as *mut Object
     }
@@ -363,6 +402,16 @@ impl Value {
         self.forward().get_ptr() as *mut Region
     }
 
+    /// Get the pointer as `Stream` using forwarding, which is a glorified `Blob`.
+    /// In debug mode panics if the value is not a pointer or the
+    /// pointed object is not a `Blob`.
+    #[classical_persistence]
+    pub unsafe fn as_stream(self) -> *mut Stream {
+        debug_assert_eq!(self.tag(), TAG_BLOB_B);
+        self.check_forwarding_pointer();
+        self.forward().get_ptr() as *mut Stream
+    }
+
     /// Get the pointer as `Region` using forwarding, without checking the tag.
     /// NB: One cannot check the tag during stabilization.
     pub unsafe fn as_untagged_region(self) -> *mut Region {
@@ -401,29 +450,38 @@ impl Value {
         self.forward().get_ptr() as *mut BigInt
     }
 
-    pub fn as_tiny(self) -> i32 {
+    pub fn as_tiny(self) -> isize {
         debug_assert!(self.is_scalar());
-        self.0 as i32 >> 1
+        self.0 as isize >> 1
     }
 
     // optimized version of `value.is_non_null_ptr() && value.get_ptr() >= address`
     // value is a non-null pointer equal or greater than the unskewed address > 1
     #[inline]
+    #[enhanced_orthogonal_persistence]
+    pub fn points_to_or_beyond(&self, address: usize) -> bool {
+        debug_assert!(address > TRUE_VALUE);
+        let raw = self.get_raw();
+        is_skewed(raw) && unskew(raw) >= address && *self != NULL_POINTER
+    }
+
+    #[inline]
+    #[classical_persistence]
     pub fn points_to_or_beyond(&self, address: usize) -> bool {
-        debug_assert!(address > TRUE_VALUE as usize);
+        debug_assert!(address > TRUE_VALUE);
         let raw = self.get_raw();
-        is_skewed(raw) && unskew(raw as usize) >= address && *self != NULL_POINTER
+        is_skewed(raw) && unskew(raw) >= address
     }
 }
 
 #[inline]
 /// Returns whether a raw value is representing a pointer. Useful when using `Value::get_raw`.
-pub fn is_ptr(value: u32) -> bool {
+pub fn is_ptr(value: usize) -> bool {
     is_skewed(value) && value != TRUE_VALUE
 }
 
 #[inline]
-pub const fn is_skewed(value: u32) -> bool {
+pub const fn is_skewed(value: usize) -> bool {
     value & 0b1 != 0
 }
 
@@ -440,10 +498,11 @@ pub const fn unskew(value: usize) -> usize {
 // NOTE: We don't create an enum for tags as we can never assume to do exhaustive pattern match on
 // tags, because of heap corruptions and other bugs (in the code generator or RTS, or maybe because
 // of an unsafe API usage).
-pub type Tag = u32;
+pub type Tag = usize;
 
 // Tags need to have the lowest bit set, to allow distinguishing a header (tag) from object
-// locations in mark-compact GC. (Reminder: objects and fields are word aligned)
+// locations in mark-compact GC. (Reminder: objects and fields are word aligned).
+// Odd tag numbers are expected by the mark-compact GC (for pointer threading).
 pub const TAG_OBJECT: Tag = 1;
 pub const TAG_ARRAY_I: Tag = 3; // Immutable Array ([T])
 pub const TAG_ARRAY_M: Tag = 5; // Mutable Array ([var T])
@@ -460,15 +519,15 @@ pub const TAG_BLOB_B: Tag = 25; // Blob of Bytes (Blob)
 pub const TAG_BLOB_T: Tag = 27; // Blob of Utf8 (Text)
 pub const TAG_BLOB_P: Tag = 29; // Principal (Principal)
 pub const TAG_BLOB_A: Tag = 31; // Actor (actor {})
-pub const TAG_FWD_PTR: Tag = 33; // Only used by the copying GC - not to be confused with forwarding pointer in the header used for incremental GC.
-pub const TAG_BITS32_U: Tag = 35; // Unsigned (Nat32)
-pub const TAG_BITS32_S: Tag = 37; // Signed (Int32)
-pub const TAG_BITS32_F: Tag = 39; // Reserved (Float32)
-pub const TAG_BIGINT: Tag = 41;
-pub const TAG_CONCAT: Tag = 43;
-pub const TAG_REGION: Tag = 45;
-pub const TAG_ONE_WORD_FILLER: Tag = 47;
-pub const TAG_FREE_SPACE: Tag = 49;
+pub const TAG_FWD_PTR: Tag = 33; // Used by graph copy stabilization and the copying GC - not to be confused with the incremental GC's forwarding pointer.
+pub const TAG_BIGINT: Tag = 35;
+pub const TAG_CONCAT: Tag = 37;
+pub const TAG_REGION: Tag = 39;
+
+#[enhanced_orthogonal_persistence]
+pub const TAG_ONE_WORD_FILLER: Tag = 41;
+#[enhanced_orthogonal_persistence]
+pub const TAG_FREE_SPACE: Tag = 43;
 
 // Special value to visit only a range of array fields.
 // This and all values above it are reserved and mean
@@ -476,14 +535,40 @@ pub const TAG_FREE_SPACE: Tag = 49;
 // purposes of `visit_pointer_fields`.
 // The top two bits encode the original array tag, the remaining bits are the start index of the slice.
 // Invariant: the value of this (pseudo-)tag must be
-//            higher than all other tags defined above
-pub const TAG_ARRAY_SLICE_MIN: Tag = 50;
+//            higher than all other tags defined above.
+// Note: The minimum value can be even, as it only denotes
+// a lower boundary to distinguish slice information from
+// the actual tag values.
+#[enhanced_orthogonal_persistence]
+pub const TAG_ARRAY_SLICE_MIN: Tag = 44;
+
 pub const TAG_SPACING: Tag = 2;
 
+#[classical_persistence]
+pub const TAG_BITS32_U: Tag = 41;
+#[classical_persistence]
+pub const TAG_BITS32_S: Tag = 43;
+#[classical_persistence]
+pub const TAG_BITS32_F: Tag = 45;
+#[classical_persistence]
+pub const TAG_NULL: Tag = 47;
+#[classical_persistence]
+pub const TAG_ONE_WORD_FILLER: Tag = 49;
+#[classical_persistence]
+pub const TAG_FREE_SPACE: Tag = 51;
+#[classical_persistence]
+pub const TAG_ARRAY_SLICE_MIN: Tag = 52;
+
+#[enhanced_orthogonal_persistence]
 pub fn is_object_tag(tag: Tag) -> bool {
     tag >= TAG_OBJECT && tag <= TAG_REGION
 }
 
+#[classical_persistence]
+pub fn is_object_tag(tag: Tag) -> bool {
+    tag >= TAG_OBJECT && tag <= TAG_NULL
+}
+
 pub fn is_blob_tag(tag: Tag) -> bool {
     tag == TAG_BLOB_B || tag == TAG_BLOB_T || tag == TAG_BLOB_P || tag == TAG_BLOB_A
 }
@@ -497,7 +582,7 @@ pub fn is_array_or_slice_tag(tag: Tag) -> bool {
 }
 
 #[inline]
-pub fn start_of_slice(tag: Tag) -> u32 {
+pub fn start_of_slice(tag: Tag) -> usize {
     tag << 2 >> 2
 }
 
@@ -506,14 +591,16 @@ pub fn tag_of_slice(tag: Tag) -> Tag {
     TAG_ARRAY_I + (tag >> (usize::BITS - 2)) * TAG_SPACING
 }
 
-pub fn slice_tag(array_tag: Tag, slice_start: u32) -> Tag {
+pub fn slice_tag(array_tag: Tag, slice_start: usize) -> Tag {
     debug_assert!(is_base_array_tag(array_tag));
-    debug_assert!(slice_start >= TAG_ARRAY_SLICE_MIN && slice_start <= MAX_ARRAY_SIZE);
+    debug_assert!(
+        slice_start >= TAG_ARRAY_SLICE_MIN && slice_start <= MAX_ARRAY_LENGTH_FOR_ITERATOR
+    );
     debug_assert!((array_tag - TAG_ARRAY_I) % TAG_SPACING == 0);
     (((array_tag - TAG_ARRAY_I) / TAG_SPACING) << (usize::BITS - 2)) | slice_start
 }
 
-pub fn slice_start(tag: Tag) -> (Tag, u32) {
+pub fn slice_start(tag: Tag) -> (Tag, usize) {
     debug_assert!(is_array_or_slice_tag(tag));
     if tag >= TAG_ARRAY_SLICE_MIN {
         (tag_of_slice(tag), start_of_slice(tag))
@@ -537,20 +624,37 @@ pub fn base_array_tag(tag: Tag) -> Tag {
 #[repr(C)] // See the note at the beginning of this module
 pub struct Obj {
     pub tag: Tag,
+    // Cannot use `#[incremental_gc]` as Rust only allows non-macro attributes for fields.
+    #[cfg(feature = "incremental_gc")]
     /// Forwarding pointer to support object moving in the incremental GC.
     pub forward: Value,
 }
 
 impl Obj {
+    #[enhanced_orthogonal_persistence]
+    pub fn new(tag: Tag, forward: Value) -> Obj {
+        Obj { tag, forward }
+    }
+
+    #[incremental_gc]
     pub fn init_forward(&mut self, value: Value) {
         self.forward = value;
     }
 
+    #[non_incremental_gc]
+    pub fn init_forward(&mut self, _value: Value) {}
+
     /// Check whether the object's forwarding pointer refers to a different location.
+    #[incremental_gc]
     pub unsafe fn is_forwarded(self: *const Self) -> bool {
         (*self).forward.get_ptr() != self as usize
     }
 
+    #[non_incremental_gc]
+    pub unsafe fn is_forwarded(self: *const Self) -> bool {
+        false
+    }
+
     pub unsafe fn tag(self: *const Self) -> Tag {
         (*self).tag
     }
@@ -570,9 +674,9 @@ impl Obj {
 #[repr(C)] // See the note at the beginning of this module
 pub struct Array {
     pub header: Obj,
-    pub len: u32, // number of elements
+    pub len: usize, // number of elements
 
-    // Array elements follow, each u32 sized. We can't have variable-sized structs in Rust so we
+    // Array elements follow, each of `usize` width. We can't have variable-sized structs in Rust so we
     // can't add a field here for the elements.
     // https://doc.rust-lang.org/nomicon/exotic-sizes.html
 }
@@ -582,7 +686,7 @@ impl Array {
         self.offset(1) as *mut Value // skip array header
     }
 
-    pub unsafe fn get(self: *mut Self, idx: u32) -> Value {
+    pub unsafe fn get(self: *mut Self, idx: usize) -> Value {
         let slot_addr = self.element_address(idx);
         *(slot_addr as *const Value)
     }
@@ -591,7 +695,7 @@ impl Array {
     /// Uses a generational post-update barrier on pointer writes.
     /// No incremental pre-update barrier as the previous value is undefined.
     /// Resolve pointer forwarding for the written value if necessary.
-    pub unsafe fn initialize(self: *mut Self, idx: u32, value: Value, mem: &mut M) {
+    pub unsafe fn initialize(self: *mut Self, idx: usize, value: Value, mem: &mut M) {
         let slot_addr = self.element_address(idx) as *mut Value;
         init_with_barrier(mem, slot_addr, value);
     }
@@ -600,18 +704,31 @@ impl Array {
     /// The written and overwritten value can be a scalar or a pointer.
     /// Applies an incremental pre-update barrier when needed.
     /// Resolves pointer forwarding for the written value.
-    pub unsafe fn set(self: *mut Self, idx: u32, value: Value, mem: &mut M) {
+    pub unsafe fn set(self: *mut Self, idx: usize, value: Value, mem: &mut M) {
         let slot_addr = self.element_address(idx) as *mut Value;
         write_with_barrier(mem, slot_addr, value);
     }
 
+    /// Write a scalar value to an array element.
+    /// No need for a write barrier.
+    pub unsafe fn set_scalar(self: *mut Self, idx: usize, value: Value) {
+        debug_assert!(value.is_scalar());
+        self.set_raw(idx, value);
+    }
+
+    /// Note: Only directly used by graph destabilization. No write barrier is applied.
+    pub unsafe fn set_raw(self: *mut Self, idx: usize, value: Value) {
+        let slot_addr = self.element_address(idx);
+        *(slot_addr as *mut Value) = value;
+    }
+
     #[inline]
-    unsafe fn element_address(self: *const Self, idx: u32) -> usize {
+    unsafe fn element_address(self: *const Self, idx: usize) -> usize {
         debug_assert!(self.len() > idx);
-        self.payload_addr() as usize + (idx * WORD_SIZE) as usize
+        self.payload_addr() as usize + idx * WORD_SIZE
     }
 
-    pub unsafe fn len(self: *const Self) -> u32 {
+    pub unsafe fn len(self: *const Self) -> usize {
         (*self).len
     }
 
@@ -619,11 +736,11 @@ impl Array {
         base_array_tag((*self).header.tag)
     }
 
-    pub unsafe fn get_slice_start(self: *const Self) -> (Tag, u32) {
+    pub unsafe fn get_slice_start(self: *const Self) -> (Tag, usize) {
         slice_start((*self).header.tag)
     }
 
-    pub unsafe fn set_slice_start(self: *mut Self, array_tag: Tag, start: u32) {
+    pub unsafe fn set_slice_start(self: *mut Self, array_tag: Tag, start: usize) {
         debug_assert!(is_base_array_tag(array_tag));
         (*self).header.tag = slice_tag(array_tag, start)
     }
@@ -636,15 +753,38 @@ impl Array {
 
 #[rustfmt::skip]
 #[repr(C)] // See the note at the beginning of this module
+#[enhanced_orthogonal_persistence]
+pub struct Region {
+    pub header: Obj,
+    pub id: u64,
+    pub page_count: usize,
+    pub vec_pages: Value, // Blob of u16's (each a page block ID).
+}
+
+#[enhanced_orthogonal_persistence]
+impl Region {
+    pub unsafe fn write_id64(self: *mut Self, value: u64) {
+        (*self).id = value;
+    }
+
+    pub unsafe fn read_id64(self: *mut Self) -> u64 {
+        (*self).id
+    }
+}
+
+#[rustfmt::skip]
+#[repr(C)] // See the note at the beginning of this module
+#[classical_persistence]
 pub struct Region {
     pub header: Obj,
     // 64-bit id split into lower and upper halves for alignment reasons
     pub id_lower: u32,
     pub id_upper: u32,
-    pub page_count: u32,
+    pub page_count: usize,
     pub vec_pages: Value, // Blob of u16's (each a page block ID).
 }
 
+#[classical_persistence]
 impl Region {
     pub unsafe fn write_id64(self: *mut Self, value: u64) {
         write64(&mut (*self).id_lower, &mut (*self).id_upper, value);
@@ -656,12 +796,22 @@ impl Region {
 }
 
 #[repr(C)] // See the note at the beginning of this module
+#[enhanced_orthogonal_persistence]
 pub struct Object {
     pub header: Obj,
     pub hash_blob: Value, // Pointer to a blob containing the hashes of the object field labels.
 }
 
+#[repr(C)] // See the note at the beginning of this module
+#[classical_persistence]
+pub struct Object {
+    pub header: Obj,
+    pub size: usize,     // Number of elements
+    pub hash_ptr: usize, // Pointer to static information about object field labels. Not important for GC (does not contain pointers).
+}
+
 impl Object {
+    #[enhanced_orthogonal_persistence]
     pub unsafe fn hash_blob_addr(self: *mut Self) -> *mut Value {
         &mut (*self).hash_blob
     }
@@ -671,24 +821,30 @@ impl Object {
     }
 
     /// Number of fields in the object.
-    pub(crate) unsafe fn size(self: *mut Self) -> u32 {
-        let hash_blob_length = (*self).hash_blob.as_blob().len().as_u32();
+    #[enhanced_orthogonal_persistence]
+    pub(crate) unsafe fn size(self: *mut Self) -> usize {
+        let hash_blob_length = (*self).hash_blob.as_blob().len().as_usize();
         debug_assert_eq!(hash_blob_length % WORD_SIZE, 0);
         hash_blob_length / WORD_SIZE
     }
 
-    #[cfg(debug_assertions)]
-    pub(crate) unsafe fn get(self: *mut Self, idx: u32) -> Value {
-        *self.payload_addr().add(idx as usize)
+    #[classical_persistence]
+    pub(crate) unsafe fn size(self: *mut Self) -> usize {
+        (*self).size
+    }
+
+    #[allow(unused)]
+    pub(crate) unsafe fn get(self: *mut Self, idx: usize) -> Value {
+        *self.payload_addr().add(idx)
     }
 }
 
 #[repr(C)] // See the note at the beginning of this module
 pub struct Closure {
     pub header: Obj,
-    pub funid: u32,
-    pub size: u32, // number of elements
-                   // other stuff follows ...
+    pub funid: usize,
+    pub size: usize, // number of elements
+                     // other stuff follows ...
 }
 
 impl Closure {
@@ -696,7 +852,7 @@ impl Closure {
         self.offset(1) as *mut Value // skip closure header
     }
 
-    pub(crate) unsafe fn size(self: *mut Self) -> u32 {
+    pub(crate) unsafe fn size(self: *mut Self) -> usize {
         (*self).size
     }
 }
@@ -704,7 +860,7 @@ impl Closure {
 #[repr(C)] // See the note at the beginning of this module
 pub struct Blob {
     pub header: Obj,
-    pub len: Bytes,
+    pub len: Bytes,
     // data follows ..
 }
 
@@ -717,16 +873,16 @@ impl Blob {
         self.add(1) as *mut u8 // skip blob header
     }
 
-    pub unsafe fn len(self: *const Self) -> Bytes {
+    pub unsafe fn len(self: *const Self) -> Bytes {
         (*self).len
     }
 
-    pub unsafe fn get(self: *const Self, idx: u32) -> u8 {
-        *self.payload_const().add(idx as usize)
+    pub unsafe fn get(self: *const Self, idx: usize) -> u8 {
+        *self.payload_const().add(idx)
     }
 
-    pub unsafe fn set(self: *mut Self, idx: u32, byte: u8) {
-        *self.payload_addr().add(idx as usize) = byte;
+    pub unsafe fn set(self: *mut Self, idx: usize, byte: u8) {
+        *self.payload_addr().add(idx) = byte;
     }
 
     pub unsafe fn payload_addr_u16(self: *mut Self) -> *mut u16 {
@@ -737,16 +893,16 @@ impl Blob {
         self.add(1) as *mut u16 // skip blob header
     }
 
-    pub unsafe fn get_u16(self: *const Self, idx: u32) -> u16 {
-        *self.payload_const_u16().add(idx as usize)
+    pub unsafe fn get_u16(self: *const Self, idx: usize) -> u16 {
+        *self.payload_const_u16().add(idx)
     }
 
-    pub unsafe fn set_u16(self: *mut Self, idx: u32, value: u16) {
-        *self.payload_addr_u16().add(idx as usize) = value;
+    pub unsafe fn set_u16(self: *mut Self, idx: usize, value: u16) {
+        *self.payload_addr_u16().add(idx) = value;
     }
 
     /// Shrink blob to the given size. Slop after the new size is filled with filler objects.
-    pub unsafe fn shrink(self: *mut Self, new_len: Bytes) {
+    pub unsafe fn shrink(self: *mut Self, new_len: Bytes) {
         let current_len_words = self.len().to_words();
         let new_len_words = new_len.to_words();
 
@@ -755,13 +911,13 @@ impl Blob {
         let slop = current_len_words - new_len_words;
 
         if slop == Words(1) {
-            let filler = (self.payload_addr() as *mut u32).add(new_len_words.as_usize())
+            let filler = (self.payload_addr() as *mut usize).add(new_len_words.as_usize())
                 as *mut OneWordFiller;
             (*filler).tag = TAG_ONE_WORD_FILLER;
         } else if slop != Words(0) {
             debug_assert!(slop >= size_of::());
             let filler =
-                (self.payload_addr() as *mut u32).add(new_len_words.as_usize()) as *mut FreeSpace;
+                (self.payload_addr() as *mut usize).add(new_len_words.as_usize()) as *mut FreeSpace;
             (*filler).tag = TAG_FREE_SPACE;
             (*filler).words = slop - size_of::();
         }
@@ -770,14 +926,76 @@ impl Blob {
     }
 }
 
-// Note: Do not declare 64-bit fields, as otherwise, the objects are expected to be 64-bit aligned.
-// This is not the case in the current heap design.
-// Moreover, fields would also get 64-bit aligned causing implicit paddding.
+/// NOTE: The stream is not used by enhanced orthogonal persistence and is designed for 32-bit only.
+/// Do not declare 64-bit fields for 32-bit stream, as otherwise, the objects are expected to be 64-bit
+/// aligned which is not the case in 32-bit heap. Moreover, fields would also get 64-bit aligned causing
+/// implicit paddding on 32-bit memory.
+#[repr(C)] // See the note at the beginning of this module
+#[classical_persistence]
+pub struct Stream {
+    pub header: Blob,
+
+    /// Components of the 64-bit `ptr` value. Little-endian encoding.
+    /// Use `read_ptr64()` and `write_ptr64()` to access.
+    pub ptr_lower: u32,
+    pub ptr_upper: u32,
+
+    /// Components of the 64-bit `start` value. Little-endian encoding.
+    /// Use `read_start64()` and `write_start64()` to access.
+    pub start_lower: u32,
+    pub start_upper: u32,
+
+    /// Components of the 64-bit `limit` value. Little-endian encoding.
+    /// Use `read_limit64()` and `write_limit64()` to access.
+    pub limit_lower: u32,
+    pub limit_upper: u32,
+
+    pub outputter: fn(*mut Self, *const u8, Bytes) -> (),
+    pub filled: Bytes, // cache data follows ..
+}
+
+#[classical_persistence]
+impl Stream {
+    pub unsafe fn is_forwarded(self: *const Self) -> bool {
+        (self as *const Obj).is_forwarded()
+    }
+
+    pub unsafe fn as_blob_mut(self: *mut Self) -> *mut Blob {
+        debug_assert!(!self.is_forwarded());
+        self as *mut Blob
+    }
+
+    pub unsafe fn write_ptr64(self: *mut Self, value: u64) {
+        write64(&mut (*self).ptr_lower, &mut (*self).ptr_upper, value);
+    }
+
+    pub unsafe fn read_ptr64(self: *const Self) -> u64 {
+        read64((*self).ptr_lower, (*self).ptr_upper)
+    }
+
+    pub unsafe fn write_start64(self: *mut Self, value: u64) {
+        write64(&mut (*self).start_lower, &mut (*self).start_upper, value);
+    }
+
+    pub unsafe fn read_start64(self: *const Self) -> u64 {
+        read64((*self).start_lower, (*self).start_upper)
+    }
+
+    pub unsafe fn write_limit64(self: *mut Self, value: u64) {
+        write64(&mut (*self).limit_lower, &mut (*self).limit_upper, value);
+    }
+
+    pub unsafe fn read_limit64(self: *const Self) -> u64 {
+        read64((*self).limit_lower, (*self).limit_upper)
+    }
+}
 
+#[classical_persistence]
 pub fn read64(lower: u32, upper: u32) -> u64 {
     ((upper as u64) << u32::BITS) | lower as u64
 }
 
+#[classical_persistence]
 pub fn write64(lower: &mut u32, upper: &mut u32, value: u64) {
     *upper = (value >> u32::BITS) as u32;
     *lower = (value & u32::MAX as u64) as u32;
@@ -799,25 +1017,57 @@ pub struct BigInt {
     /// The data pointer (mp_int.dp) is irrelevant, and will be changed to point to
     /// the data within this object before it is used.
     /// (NB: If we have a non-moving GC, we can make this an invariant)
+    /// NOTE: `mp_int` originates from Tom's math library implementation.
+    /// Layout in 64-bit memory (with enhanced orthogonal persistence):
+    /// ```
+    /// pub struct mp_int { // Total size 24
+    ///   pub used: c_int, // Offset 0, size 4
+    ///   pub alloc: c_int, // Offset 4, size 4
+    ///   pub sign: mp_sign, // Offset 8, size 4
+    ///   _padding: u32, // Implicit padding to align subsequent 64-bit pointer
+    ///   pub dp: *mut mp_digit, // Offset 16, size 8
+    /// }
+    /// ```
+    /// Layout in 32-bit memory (with classical persistence):
+    /// ```
+    /// pub struct mp_int { // Total size 24
+    ///   pub used: c_int, // Offset 0, size 4
+    ///   pub alloc: c_int, // Offset 4, size 4
+    ///   pub sign: mp_sign, // Offset 8, size 4
+    ///   pub dp: *mut mp_digit, // Offset 12, size 4
+    /// }
+    /// ```
     pub mp_int: mp_int,
     // data follows ..
+    // Array of `mp_int` with length `alloc`.
+    // Each `mp_int` has byte size `size_of()`.
 }
 
 impl BigInt {
-    pub unsafe fn len(self: *mut Self) -> Bytes {
-        Bytes(((*self).mp_int.alloc as usize * core::mem::size_of::()) as u32)
+    pub unsafe fn len(self: *mut Self) -> Bytes {
+        Self::data_length(&(*self).mp_int)
+    }
+
+    pub unsafe fn data_length(mp_int: *const mp_int) -> Bytes {
+        Bytes((*mp_int).alloc as usize * core::mem::size_of::())
     }
 
     pub unsafe fn payload_addr(self: *mut Self) -> *mut mp_digit {
         self.add(1) as *mut mp_digit // skip closure header
     }
 
+    #[incremental_gc]
     pub unsafe fn forward(self: *mut Self) -> *mut Self {
         (*self).header.forward.as_bigint()
     }
 
+    #[non_incremental_gc]
+    pub unsafe fn forward(self: *mut Self) -> *mut Self {
+        self
+    }
+
     pub unsafe fn from_payload(ptr: *mut mp_digit) -> *mut Self {
-        let bigint = (ptr as *mut u32).sub(size_of::().as_usize()) as *mut BigInt;
+        let bigint = (ptr as *mut usize).sub(size_of::().as_usize()) as *mut BigInt;
         bigint.forward()
     }
 
@@ -851,14 +1101,14 @@ pub struct Some {
 #[repr(C)] // See the note at the beginning of this module
 pub struct Variant {
     pub header: Obj,
-    pub tag: u32,
+    pub tag: usize,
     pub field: Value,
 }
 
 #[repr(C)] // See the note at the beginning of this module
 pub struct Concat {
     pub header: Obj,
-    pub n_bytes: Bytes,
+    pub n_bytes: Bytes,
     pub text1: Value,
     pub text2: Value,
 }
@@ -874,6 +1124,27 @@ impl Concat {
 }
 
 #[repr(C)] // See the note at the beginning of this module
+#[classical_persistence]
+pub struct Null {
+    pub header: Obj,
+}
+
+#[repr(C)] // See the note at the beginning of this module
+#[enhanced_orthogonal_persistence]
+pub struct Bits64 {
+    pub header: Obj,
+    pub bits: u64,
+}
+
+#[enhanced_orthogonal_persistence]
+impl Bits64 {
+    pub fn bits(&self) -> u64 {
+        self.bits
+    }
+}
+
+#[repr(C)] // See the note at the beginning of this module
+#[classical_persistence]
 pub struct Bits64 {
     pub header: Obj,
     // We have two 32-bit fields instead of one 64-bit to avoid aligning the fields on 64-bit
@@ -882,6 +1153,7 @@ pub struct Bits64 {
     bits_hi: u32,
 }
 
+#[classical_persistence]
 impl Bits64 {
     pub fn bits(&self) -> u64 {
         (u64::from(self.bits_hi) << 32) | u64::from(self.bits_lo)
@@ -889,6 +1161,7 @@ impl Bits64 {
 }
 
 #[repr(C)] // See the note at the beginning of this module
+#[classical_persistence]
 pub struct Bits32 {
     pub header: Obj,
     pub bits: u32,
@@ -904,12 +1177,12 @@ pub struct OneWordFiller {
 #[repr(C)] // See the note at the beginning of this module
 pub struct FreeSpace {
     pub tag: Tag,
-    pub words: Words,
+    pub words: Words,
 }
 
 impl FreeSpace {
     /// Size of the free space (includes object header)
-    pub unsafe fn size(self: *mut Self) -> Words {
+    pub unsafe fn size(self: *mut Self) -> Words {
         (*self).words + size_of::()
     }
 }
@@ -918,7 +1191,7 @@ impl FreeSpace {
 /// Handles both objects with header and forwarding pointer
 /// and special blocks such as `OneWordFiller`, `FwdPtr`, and `FreeSpace`
 /// that do not have a forwarding pointer.
-pub(crate) unsafe fn block_size(address: usize) -> Words {
+pub(crate) unsafe fn block_size(address: usize) -> Words {
     let tag = *(address as *mut Tag);
     match tag {
         TAG_OBJECT => {
@@ -958,8 +1231,6 @@ pub(crate) unsafe fn block_size(address: usize) -> Words {
             rts_trap_with("object_size: forwarding pointer");
         }
 
-        TAG_BITS32_U | TAG_BITS32_S | TAG_BITS32_F => size_of::(),
-
         TAG_BIGINT => {
             let bigint = address as *mut BigInt;
             size_of::() + bigint.len().to_words()
@@ -976,6 +1247,12 @@ pub(crate) unsafe fn block_size(address: usize) -> Words {
 
         TAG_REGION => size_of::(),
 
+        #[cfg(not(feature = "enhanced_orthogonal_persistence"))]
+        TAG_BITS32_U | TAG_BITS32_S | TAG_BITS32_F => size_of::(),
+
+        #[cfg(not(feature = "enhanced_orthogonal_persistence"))]
+        TAG_NULL => size_of::(),
+
         _ => {
             rts_trap_with("object_size: invalid object tag");
         }
diff --git a/rts/motoko-rts/src/utf8.rs b/rts/motoko-rts/src/utf8.rs
index 913206194e6..3cf43ce85ab 100644
--- a/rts/motoko-rts/src/utf8.rs
+++ b/rts/motoko-rts/src/utf8.rs
@@ -1,6 +1,8 @@
+use crate::libc_declarations::c_char;
+
 /// Panics if the string is not valid UTF-8
 #[no_mangle]
-pub(crate) unsafe extern "C" fn utf8_validate(str: *const libc::c_char, len: u32) {
+pub(crate) unsafe extern "C" fn utf8_validate(str: *const c_char, len: usize) {
     if !utf8_valid(str, len) {
         crate::rts_trap_with("utf8_validate: string is not UTF-8");
     }
@@ -8,6 +10,6 @@ pub(crate) unsafe extern "C" fn utf8_validate(str: *const libc::c_char, len: u32
 
 /// Returns whether the string is valid UTF-8
 #[no_mangle]
-pub unsafe extern "C" fn utf8_valid(str: *const libc::c_char, len: u32) -> bool {
-    core::str::from_utf8(core::slice::from_raw_parts(str as *const _, len as usize)).is_ok()
+pub unsafe extern "C" fn utf8_valid(str: *const c_char, len: usize) -> bool {
+    core::str::from_utf8(core::slice::from_raw_parts(str as *const _, len)).is_ok()
 }
diff --git a/rts/motoko-rts/src/visitor.rs b/rts/motoko-rts/src/visitor.rs
index 44de9d1490a..4773f5fdc08 100644
--- a/rts/motoko-rts/src/visitor.rs
+++ b/rts/motoko-rts/src/visitor.rs
@@ -1,4 +1,11 @@
-use crate::rts_trap_with;
+#[enhanced_orthogonal_persistence]
+pub mod enhanced;
+
+#[classical_persistence]
+pub mod classical;
+
+use motoko_rts_macros::{classical_persistence, enhanced_orthogonal_persistence};
+
 use crate::types::*;
 
 /// A visitor that passes field addresses of fields with pointers to dynamic heap to the given
@@ -9,137 +16,49 @@ use crate::types::*;
 /// * `ctx`: any context passed to the `visit_*` callbacks
 /// * `obj`: the heap object to be visited (note: its heap tag may be invalid)
 /// * `tag`: the heap object's logical tag (or start of array object's suffix slice)
+/// * `_heap_base`: start address of the dynamic heap, only used with classical persistence.
 /// * `visit_ptr_field`: callback for individual fields
 /// * `visit_field_range`: callback for determining the suffix slice
 ///   Arguments:
 ///   * `&mut C`: passed context
-///   * `u32`: start index of array suffix slice being visited
+///   * `usize`: start index of array suffix slice being visited
 ///   * `*mut Array`: home object of the slice (its heap tag may be invalid)
 ///   Returns:
-///   * `u32`: start of the suffix slice of fields not to be passed to `visit_ptr_field`;
+///   * `usize`: start of the suffix slice of fields not to be passed to `visit_ptr_field`;
 ///            it is the callback's responsibility to deal with the spanned slice
-
+#[enhanced_orthogonal_persistence]
 pub unsafe fn visit_pointer_fields(
     ctx: &mut C,
     obj: *mut Obj,
     tag: Tag,
+    _heap_base: usize,
     visit_ptr_field: F,
     visit_field_range: G,
 ) where
     F: Fn(&mut C, *mut Value),
-    G: Fn(&mut C, u32, *mut Array) -> u32,
+    G: Fn(&mut C, usize, *mut Array) -> usize,
 {
-    match tag {
-        TAG_OBJECT => {
-            let obj = obj as *mut Object;
-            debug_assert!(is_non_null_pointer_field(obj.hash_blob_addr()));
-            visit_ptr_field(ctx, obj.hash_blob_addr());
-            let obj_payload = obj.payload_addr();
-            for i in 0..obj.size() {
-                let field_addr = obj_payload.add(i as usize);
-                if is_non_null_pointer_field(field_addr) {
-                    visit_ptr_field(ctx, obj_payload.add(i as usize));
-                }
-            }
-        }
-
-        TAG_ARRAY_I | TAG_ARRAY_M | TAG_ARRAY_T | TAG_ARRAY_S | TAG_ARRAY_SLICE_MIN.. => {
-            let (_, slice_start) = slice_start(tag);
-            let array = obj as *mut Array;
-            debug_assert!(slice_start <= array.len());
-            let array_payload = array.payload_addr();
-            let stop = visit_field_range(ctx, slice_start, array);
-            debug_assert!(stop <= array.len());
-            for i in slice_start..stop {
-                let field_addr = array_payload.add(i as usize);
-                if is_non_null_pointer_field(field_addr) {
-                    visit_ptr_field(ctx, field_addr);
-                }
-            }
-        }
-
-        TAG_MUTBOX => {
-            let mutbox = obj as *mut MutBox;
-            let field_addr = &mut (*mutbox).field;
-            if is_non_null_pointer_field(field_addr) {
-                visit_ptr_field(ctx, field_addr);
-            }
-        }
-
-        TAG_CLOSURE => {
-            let closure = obj as *mut Closure;
-            let closure_payload = closure.payload_addr();
-            for i in 0..closure.size() {
-                let field_addr = closure_payload.add(i as usize);
-                if is_non_null_pointer_field(field_addr) {
-                    visit_ptr_field(ctx, field_addr);
-                }
-            }
-        }
-
-        TAG_SOME => {
-            let some = obj as *mut Some;
-            let field_addr = &mut (*some).field;
-            if is_non_null_pointer_field(field_addr) {
-                visit_ptr_field(ctx, field_addr);
-            }
-        }
-
-        TAG_VARIANT => {
-            let variant = obj as *mut Variant;
-            let field_addr = &mut (*variant).field;
-            if is_non_null_pointer_field(field_addr) {
-                visit_ptr_field(ctx, field_addr);
-            }
-        }
-
-        TAG_REGION => {
-            let region = obj as *mut Region;
-            let field_addr = &mut (*region).vec_pages;
-            if is_non_null_pointer_field(field_addr) {
-                visit_ptr_field(ctx, field_addr);
-            }
-        }
-
-        TAG_CONCAT => {
-            let concat = obj as *mut Concat;
-            let field1_addr = &mut (*concat).text1;
-            if is_non_null_pointer_field(field1_addr) {
-                visit_ptr_field(ctx, field1_addr);
-            }
-            let field2_addr = &mut (*concat).text2;
-            if is_non_null_pointer_field(field2_addr) {
-                visit_ptr_field(ctx, field2_addr);
-            }
-        }
-
-        TAG_BITS32_U | TAG_BITS32_S | TAG_BITS32_F | TAG_BITS64_U | TAG_BITS64_S | TAG_BITS64_F
-        | TAG_BLOB_B | TAG_BLOB_T | TAG_BLOB_P | TAG_BLOB_A | TAG_BIGINT => {
-            // These don't have pointers, skip
-        }
-
-        TAG_FWD_PTR | TAG_ONE_WORD_FILLER | TAG_FREE_SPACE | _ => {
-            rts_trap_with("invalid object tag in visit_pointer_fields");
-        }
-    }
+    self::enhanced::visit_pointer_fields(ctx, obj, tag, visit_ptr_field, visit_field_range);
 }
 
-// Temporary function can be later removed.
-pub unsafe fn is_non_null_pointer_field(field_addr: *mut Value) -> bool {
-    let field_value = *field_addr;
-    check_field_value(field_value);
-    field_value.is_non_null_ptr()
-}
-
-// Temporary check, can be later removed.
-#[cfg(feature = "ic")]
-fn check_field_value(value: Value) {
-    debug_assert!(
-        value.is_scalar()
-            || value.get_ptr() >= crate::persistence::HEAP_START
-            || value == NULL_POINTER
+#[classical_persistence]
+pub unsafe fn visit_pointer_fields(
+    ctx: &mut C,
+    obj: *mut Obj,
+    tag: Tag,
+    heap_base: usize,
+    visit_ptr_field: F,
+    visit_field_range: G,
+) where
+    F: Fn(&mut C, *mut Value),
+    G: Fn(&mut C, usize, *mut Array) -> usize,
+{
+    self::classical::visit_pointer_fields(
+        ctx,
+        obj,
+        tag,
+        heap_base,
+        visit_ptr_field,
+        visit_field_range,
     );
 }
-
-#[cfg(not(feature = "ic"))]
-fn check_field_value(_value: Value) {}
diff --git a/rts/motoko-rts/src/visitor/classical.rs b/rts/motoko-rts/src/visitor/classical.rs
new file mode 100644
index 00000000000..c2e7a95ab54
--- /dev/null
+++ b/rts/motoko-rts/src/visitor/classical.rs
@@ -0,0 +1,134 @@
+use crate::rts_trap_with;
+use crate::types::*;
+
+/// A visitor that passes field addresses of fields with pointers to dynamic heap to the given
+/// callback
+///
+/// Arguments:
+///
+/// * `ctx`: any context passed to the `visit_*` callbacks
+/// * `obj`: the heap object to be visited (note: its heap tag may be invalid)
+/// * `tag`: the heap object's logical tag (or start of array object's suffix slice)
+/// * `heap_base`: start address of the dynamic heap.
+/// * `visit_ptr_field`: callback for individual fields
+/// * `visit_field_range`: callback for determining the suffix slice
+///   Arguments:
+///   * `&mut C`: passed context
+///   * `usize`: start index of array suffix slice being visited
+///   * `*mut Array`: home object of the slice (its heap tag may be invalid)
+///   Returns:
+///   * `usize`: start of the suffix slice of fields not to be passed to `visit_ptr_field`;
+///            it is the callback's responsibility to deal with the spanned slice
+pub unsafe fn visit_pointer_fields(
+    ctx: &mut C,
+    obj: *mut Obj,
+    tag: Tag,
+    heap_base: usize,
+    visit_ptr_field: F,
+    visit_field_range: G,
+) where
+    F: Fn(&mut C, *mut Value),
+    G: Fn(&mut C, usize, *mut Array) -> usize,
+{
+    match tag {
+        TAG_OBJECT => {
+            let obj = obj as *mut Object;
+            let obj_payload = obj.payload_addr();
+            for i in 0..obj.size() {
+                let field_addr = obj_payload.add(i);
+                if pointer_to_dynamic_heap(field_addr, heap_base) {
+                    visit_ptr_field(ctx, obj_payload.add(i));
+                }
+            }
+        }
+
+        TAG_ARRAY_I | TAG_ARRAY_M | TAG_ARRAY_T | TAG_ARRAY_S | TAG_ARRAY_SLICE_MIN.. => {
+            let (_, slice_start) = slice_start(tag);
+            let array = obj as *mut Array;
+            debug_assert!(slice_start <= array.len());
+            let array_payload = array.payload_addr();
+            let stop = visit_field_range(ctx, slice_start, array);
+            debug_assert!(stop <= array.len());
+            for i in slice_start..stop {
+                let field_addr = array_payload.add(i);
+                if pointer_to_dynamic_heap(field_addr, heap_base) {
+                    visit_ptr_field(ctx, field_addr);
+                }
+            }
+        }
+
+        TAG_MUTBOX => {
+            let mutbox = obj as *mut MutBox;
+            let field_addr = &mut (*mutbox).field;
+            if pointer_to_dynamic_heap(field_addr, heap_base) {
+                visit_ptr_field(ctx, field_addr);
+            }
+        }
+
+        TAG_CLOSURE => {
+            let closure = obj as *mut Closure;
+            let closure_payload = closure.payload_addr();
+            for i in 0..closure.size() {
+                let field_addr = closure_payload.add(i);
+                if pointer_to_dynamic_heap(field_addr, heap_base) {
+                    visit_ptr_field(ctx, field_addr);
+                }
+            }
+        }
+
+        TAG_SOME => {
+            let some = obj as *mut Some;
+            let field_addr = &mut (*some).field;
+            if pointer_to_dynamic_heap(field_addr, heap_base) {
+                visit_ptr_field(ctx, field_addr);
+            }
+        }
+
+        TAG_VARIANT => {
+            let variant = obj as *mut Variant;
+            let field_addr = &mut (*variant).field;
+            if pointer_to_dynamic_heap(field_addr, heap_base) {
+                visit_ptr_field(ctx, field_addr);
+            }
+        }
+
+        TAG_REGION => {
+            let region = obj as *mut Region;
+            let field_addr = &mut (*region).vec_pages;
+            if pointer_to_dynamic_heap(field_addr, heap_base) {
+                visit_ptr_field(ctx, field_addr);
+            }
+        }
+
+        TAG_CONCAT => {
+            let concat = obj as *mut Concat;
+            let field1_addr = &mut (*concat).text1;
+            if pointer_to_dynamic_heap(field1_addr, heap_base) {
+                visit_ptr_field(ctx, field1_addr);
+            }
+            let field2_addr = &mut (*concat).text2;
+            if pointer_to_dynamic_heap(field2_addr, heap_base) {
+                visit_ptr_field(ctx, field2_addr);
+            }
+        }
+
+        TAG_BITS64_U | TAG_BITS64_S | TAG_BITS64_F | TAG_BITS32_U | TAG_BITS32_S | TAG_BITS32_F
+        | TAG_BLOB_B | TAG_BLOB_T | TAG_BLOB_P | TAG_BLOB_A | TAG_BIGINT => {
+            // These don't have pointers, skip
+        }
+
+        TAG_NULL => {
+            rts_trap_with("encountered NULL object tag in visit_pointer_fields");
+        }
+
+        TAG_FWD_PTR | TAG_ONE_WORD_FILLER | TAG_FREE_SPACE | _ => {
+            rts_trap_with("invalid object tag in visit_pointer_fields");
+        }
+    }
+}
+
+pub unsafe fn pointer_to_dynamic_heap(field_addr: *mut Value, heap_base: usize) -> bool {
+    // NB. pattern matching on `field_addr.get()` generates inefficient code
+    let field_value = (*field_addr).get_raw();
+    is_ptr(field_value) && unskew(field_value as usize) >= heap_base
+}
diff --git a/rts/motoko-rts/src/visitor/enhanced.rs b/rts/motoko-rts/src/visitor/enhanced.rs
new file mode 100644
index 00000000000..57220bbd6a1
--- /dev/null
+++ b/rts/motoko-rts/src/visitor/enhanced.rs
@@ -0,0 +1,129 @@
+use crate::rts_trap_with;
+use crate::types::*;
+
+/// A visitor that passes field addresses of fields with pointers to dynamic heap to the given
+/// callback
+///
+/// Arguments:
+///
+/// * `ctx`: any context passed to the `visit_*` callbacks
+/// * `obj`: the heap object to be visited (note: its heap tag may be invalid)
+/// * `tag`: the heap object's logical tag (or start of array object's suffix slice)
+/// * `visit_ptr_field`: callback for individual fields
+/// * `visit_field_range`: callback for determining the suffix slice
+///   Arguments:
+///   * `&mut C`: passed context
+///   * `usize`: start index of array suffix slice being visited
+///   * `*mut Array`: home object of the slice (its heap tag may be invalid)
+///   Returns:
+///   * `usize`: start of the suffix slice of fields not to be passed to `visit_ptr_field`;
+///            it is the callback's responsibility to deal with the spanned slice
+pub unsafe fn visit_pointer_fields(
+    ctx: &mut C,
+    obj: *mut Obj,
+    tag: Tag,
+    visit_ptr_field: F,
+    visit_field_range: G,
+) where
+    F: Fn(&mut C, *mut Value),
+    G: Fn(&mut C, usize, *mut Array) -> usize,
+{
+    match tag {
+        TAG_OBJECT => {
+            let obj = obj as *mut Object;
+            debug_assert!(is_non_null_pointer_field(obj.hash_blob_addr()));
+            visit_ptr_field(ctx, obj.hash_blob_addr());
+            let obj_payload = obj.payload_addr();
+            for i in 0..obj.size() {
+                let field_addr = obj_payload.add(i);
+                if is_non_null_pointer_field(field_addr) {
+                    visit_ptr_field(ctx, obj_payload.add(i));
+                }
+            }
+        }
+
+        TAG_ARRAY_I | TAG_ARRAY_M | TAG_ARRAY_T | TAG_ARRAY_S | TAG_ARRAY_SLICE_MIN.. => {
+            let (_, slice_start) = slice_start(tag);
+            let array = obj as *mut Array;
+            debug_assert!(slice_start <= array.len());
+            let array_payload = array.payload_addr();
+            let stop = visit_field_range(ctx, slice_start, array);
+            debug_assert!(stop <= array.len());
+            for i in slice_start..stop {
+                let field_addr = array_payload.add(i);
+                if is_non_null_pointer_field(field_addr) {
+                    visit_ptr_field(ctx, field_addr);
+                }
+            }
+        }
+
+        TAG_MUTBOX => {
+            let mutbox = obj as *mut MutBox;
+            let field_addr = &mut (*mutbox).field;
+            if is_non_null_pointer_field(field_addr) {
+                visit_ptr_field(ctx, field_addr);
+            }
+        }
+
+        TAG_CLOSURE => {
+            let closure = obj as *mut Closure;
+            let closure_payload = closure.payload_addr();
+            for i in 0..closure.size() {
+                let field_addr = closure_payload.add(i);
+                if is_non_null_pointer_field(field_addr) {
+                    visit_ptr_field(ctx, field_addr);
+                }
+            }
+        }
+
+        TAG_SOME => {
+            let some = obj as *mut Some;
+            let field_addr = &mut (*some).field;
+            if is_non_null_pointer_field(field_addr) {
+                visit_ptr_field(ctx, field_addr);
+            }
+        }
+
+        TAG_VARIANT => {
+            let variant = obj as *mut Variant;
+            let field_addr = &mut (*variant).field;
+            if is_non_null_pointer_field(field_addr) {
+                visit_ptr_field(ctx, field_addr);
+            }
+        }
+
+        TAG_REGION => {
+            let region = obj as *mut Region;
+            let field_addr = &mut (*region).vec_pages;
+            if is_non_null_pointer_field(field_addr) {
+                visit_ptr_field(ctx, field_addr);
+            }
+        }
+
+        TAG_CONCAT => {
+            let concat = obj as *mut Concat;
+            let field1_addr = &mut (*concat).text1;
+            if is_non_null_pointer_field(field1_addr) {
+                visit_ptr_field(ctx, field1_addr);
+            }
+            let field2_addr = &mut (*concat).text2;
+            if is_non_null_pointer_field(field2_addr) {
+                visit_ptr_field(ctx, field2_addr);
+            }
+        }
+
+        TAG_BITS64_U | TAG_BITS64_S | TAG_BITS64_F | TAG_BLOB_B | TAG_BLOB_T | TAG_BLOB_P
+        | TAG_BLOB_A | TAG_BIGINT => {
+            // These don't have pointers, skip
+        }
+
+        TAG_FWD_PTR | TAG_ONE_WORD_FILLER | TAG_FREE_SPACE | _ => {
+            rts_trap_with("invalid object tag in visit_pointer_fields");
+        }
+    }
+}
+
+pub unsafe fn is_non_null_pointer_field(field_addr: *mut Value) -> bool {
+    let field_value = *field_addr;
+    field_value.is_non_null_ptr()
+}
diff --git a/src/codegen/compile.ml b/src/codegen/compile_classical.ml
similarity index 87%
rename from src/codegen/compile.ml
rename to src/codegen/compile_classical.ml
index 809404ef7fe..18448230778 100644
--- a/src/codegen/compile.ml
+++ b/src/codegen/compile_classical.ml
@@ -16,12 +16,18 @@ open Mo_types
 open Mo_config
 
 open Wasm_exts.Ast
-open Wasm.Types
 open Source
-
 (* Re-shadow Source.(@@), to get Stdlib.(@@) *)
 let (@@) = Stdlib.(@@)
 
+module Wasm = struct
+include Wasm
+  module Types = Wasm_exts.Types
+  module Values = Wasm_exts.Values
+end
+
+open Wasm.Types
+
 module G = InstrList
 let (^^) = G.(^^) (* is this how we import a single operator from a module that we otherwise use qualified? *)
 
@@ -179,7 +185,7 @@ module StaticBytes = struct
 
   type t_ =
     | I32 of int32
-    (* | I64 of int64 *)
+    | I64 of int64
     | Seq of t
     | Bytes of string
 
@@ -189,7 +195,7 @@ module StaticBytes = struct
 
   let rec add : Buffer.t -> t_ -> unit = fun buf -> function
     | I32 i -> Buffer.add_int32_le buf i
-    (* | I64 i -> Buffer.add_int64_le buf i *)
+    | I64 i -> Buffer.add_int64_le buf i
     | Seq xs -> List.iter (add buf) xs
     | Bytes b -> Buffer.add_string buf b
 
@@ -230,7 +236,7 @@ module Const = struct
     | Blob of string
     | Null
 
-  let lit_eq l1 l2 = match l1, l2 with
+  let lit_eq = function
     | Vanilla i, Vanilla j -> i = j
     | BigInt i, BigInt j -> Big_int.eq_big_int i j
     | Word32 (tyi, i), Word32 (tyj, j) -> tyi = tyj && i = j
@@ -273,34 +279,23 @@ module Const = struct
   *)
 
   type v =
-    | Fun of int32 * (unit -> int32) * fun_rhs (* id is used for equality check, function pointer calculated upon first use *)
+    | Fun of (unit -> int32) * fun_rhs (* function pointer calculated upon first use *)
     | Message of int32 (* anonymous message, only temporary *)
-    | Obj of (string * v) list
+    | Obj of (string * t) list
     | Unit
-    | Array of v list (* immutable arrays *)
-    | Tuple of v list (* non-nullary tuples *)
-    | Tag of (string * v)
-    | Opt of v
+    | Array of t list (* immutable arrays *)
+    | Tuple of t list (* non-nullary tuples *)
+    | Tag of (string * t)
+    | Opt of t
     | Lit of lit
 
-  let rec eq v1 v2 = match v1, v2 with
-    | Fun (id1, _, _), Fun (id2, _, _) -> id1 = id2
-    | Message fi1, Message fi2 -> fi1 = fi2
-    | Obj fields1, Obj fields2 ->
-      let equal_fields (name1, field_value1) (name2, field_value2) = (name1 = name2) && (eq field_value1 field_value2) in
-      List.for_all2 equal_fields fields1 fields2
-    | Unit, Unit -> true
-    | Array elements1, Array elements2 ->
-      List.for_all2 eq elements1 elements2
-    | Tuple elements1, Tuple elements2 ->
-      List.for_all2 eq elements1 elements2
-    | Tag (name1, tag_value1), Tag (name2, tag_value2) ->
-      (name1 = name2) && (eq tag_value1 tag_value2)
-    | Opt opt_value1, Opt opt_value2 -> eq opt_value1 opt_value2
-    | Lit l1, Lit l2 -> lit_eq l1 l2
-    | Fun _, _ | Message _, _ | Obj _, _ | Unit, _ 
-    | Array _, _ | Tuple _, _ | Tag _, _ | Opt _, _ 
-    | Lit _, _ -> false
+  (* A constant known value together with a vanilla pointer.
+     Typically a static memory location, could be an unboxed scalar.
+     Filled on demand.
+   *)
+  and t = (int32 Lib.Promise.t * v)
+
+  let t_of_v v = (Lib.Promise.make (), v)
 
 end (* Const *)
 
@@ -320,7 +315,7 @@ module SR = struct
     | UnboxedWord32 of Type.prim
     | UnboxedFloat64
     | Unreachable
-    | Const of Const.v
+    | Const of Const.t
 
   let unit = UnboxedTuple 0
 
@@ -329,9 +324,16 @@ module SR = struct
   (* Because t contains Const.t, and that contains Const.v, and that contains
      Const.lit, and that contains Big_int, we cannot just use normal `=`. So we
      have to write our own equality.
+
+     This equalty is, I believe, used when joining branches. So for Const, we
+     just compare the promises, and do not descend into the Const.v. This is
+     conservative; the only downside is that if a branch returns different
+     Const.t with (semantically) the same Const.v we do not propagate that as
+     Const, but materialize before the branch.
+     Which is not really expected or important.
   *)
   let eq (t1 : t) (t2 : t) = match t1, t2 with
-    | Const c1, Const c2 -> Const.eq c1 c2
+    | Const (p1, _), Const (p2, _) -> p1 == p2
     | _ -> t1 = t2
 
   let to_var_type : t -> value_type = function
@@ -401,23 +403,7 @@ module E = struct
   type local_names = (int32 * string) list (* For the debug section: Names of locals *)
   type func_with_names = func * local_names
   type lazy_function = (int32, func_with_names) Lib.AllocOnUse.t
-  type type_descriptor = {
-    candid_data_segment : int32;
-    type_offsets_segment : int32;
-    idl_types_segment : int32;
-  }
-  (* Object allocation code. *)
-  type object_allocation = t -> G.t
-  (* Pool of shared objects.
-     Alllocated in the dynamic heap on program initialization/upgrade.
-     Identified by the index position in this list and accessed via the runtime system.
-     Registered as GC root set and replaced on program upgrade. 
-  *)
-  and object_pool = {
-    objects: object_allocation list ref;
-    frozen: bool ref;
-  }
-  and t = {
+  type t = {
     (* Global fields *)
     (* Static *)
     mode : Flags.compile_mode;
@@ -441,9 +427,20 @@ module E = struct
     named_imports : int32 NameEnv.t ref;
     built_in_funcs : lazy_function NameEnv.t ref;
     static_strings : int32 StringEnv.t ref;
-    data_segments : string list ref; (* Passive data segments *)
-    object_pool : object_pool;
-      
+      (* Pool for shared static objects. Their lookup needs to be specifically
+         handled by using the tag and the payload without the forwarding pointer.
+         This is because the forwarding pointer depends on the allocation adddress.
+         The lookup is different to `static_string` that has no such
+         allocation-dependent content and can thus be immediately looked up by
+         the string value. *)
+    object_pool : int32 StringEnv.t ref;
+    end_of_static_memory : int32 ref; (* End of statically allocated memory *)
+    static_memory : (int32 * string) list ref; (* Content of static memory *)
+    static_memory_frozen : bool ref;
+      (* Sanity check: Nothing should bump end_of_static_memory once it has been read *)
+    static_roots : int32 list ref;
+      (* GC roots in static memory. (Everything that may be mutable.) *)
+
     (* Types accumulated in global typtbl (for candid subtype checks)
        See Note [Candid subtype checks]
     *)
@@ -469,21 +466,11 @@ module E = struct
 
     (* requires stable memory (and emulation on wasm targets) *)
     requires_stable_memory : bool ref;
-
-    (* Type descriptor of current program version, created on `conclude_module`. *)
-    global_type_descriptor : type_descriptor option ref;
-
-    (* Counter for deriving a unique id per constant function. *)
-    constant_functions : int32 ref;
   }
 
-  (* Compile-time-known value, either a plain vanilla constant or a shared object. *)
-  type shared_value =
-  | Vanilla of int32
-  | SharedObject of int32 (* index in object pool *)
-  
+
   (* The initial global environment *)
-  let mk_global mode rts trap_with : t = {
+  let mk_global mode rts trap_with dyn_mem : t = {
     mode;
     rts;
     trap_with;
@@ -499,8 +486,11 @@ module E = struct
     named_imports = ref NameEnv.empty;
     built_in_funcs = ref NameEnv.empty;
     static_strings = ref StringEnv.empty;
-    data_segments = ref [];
-    object_pool = { objects = ref []; frozen = ref false };
+    object_pool = ref StringEnv.empty;
+    end_of_static_memory = ref dyn_mem;
+    static_memory = ref [];
+    static_memory_frozen = ref false;
+    static_roots = ref [];
     typtbl_typs = ref [];
     (* Metadata *)
     args = ref None;
@@ -514,8 +504,6 @@ module E = struct
     local_names = ref [];
     features = ref FeatureSet.empty;
     requires_stable_memory = ref false;
-    global_type_descriptor = ref None;
-    constant_functions = ref 0l;
   }
 
   (* This wraps Mo_types.Hash.hash to also record which labels we have seen,
@@ -621,11 +609,6 @@ module E = struct
   let make_lazy_function env name : lazy_function =
     Lib.AllocOnUse.make (fun () -> reserve_fun env name)
 
-  let get_constant_function_id (env : t) : int32 =
-    let id = !(env.constant_functions) in
-    env.constant_functions := (Int32.add id 1l);
-    id
-
   let lookup_built_in (env : t) name : lazy_function =
     match NameEnv.find_opt name !(env.built_in_funcs) with
     | None ->
@@ -708,10 +691,21 @@ module E = struct
   let then_trap_with env msg = G.if0 (trap_with env msg) G.nop
   let else_trap_with env msg = G.if0 G.nop (trap_with env msg)
 
-  let add_data_segment (env : t) data : int32 =
-    let index = List.length !(env.data_segments) in
-    env.data_segments := !(env.data_segments) @ [ data ];
-    Int32.of_int index
+  let reserve_static_memory (env : t) size : int32 =
+    if !(env.static_memory_frozen) then raise (Invalid_argument "Static memory frozen");
+    let ptr = !(env.end_of_static_memory) in
+    let aligned = Int32.logand (Int32.add size 3l) (Int32.lognot 3l) in
+    env.end_of_static_memory := Int32.add ptr aligned;
+    ptr
+
+  let write_static_memory (env : t) ptr data =
+    env.static_memory := !(env.static_memory) @ [ (ptr, data) ];
+    ()
+
+  let add_mutable_static_bytes (env : t) data : int32 =
+    let ptr = reserve_static_memory env (Int32.of_int (String.length data)) in
+    env.static_memory := !(env.static_memory) @ [ (ptr, data) ];
+    Int32.(add ptr ptr_skew) (* Return a skewed pointer *)
 
   let add_fun_ptr (env : t) fi : int32 =
     match FunEnv.find_opt fi !(env.func_ptrs) with
@@ -731,44 +725,50 @@ module E = struct
   let add_static (env : t) (data : StaticBytes.t) : int32 =
     let b = StaticBytes.as_bytes data in
     match StringEnv.find_opt b !(env.static_strings)  with
-    | Some segment_index -> segment_index
+    | Some ptr -> ptr
     | None ->
-      let segment_index = add_data_segment env b  in
-      env.static_strings := StringEnv.add b segment_index !(env.static_strings);
-      segment_index
-
-  let replace_data_segment (env : t) (segment_index : int32) (data : StaticBytes.t) : int32 =
-    let new_value = StaticBytes.as_bytes data in
-    let segment_index = Int32.to_int segment_index in
-    assert (segment_index < List.length !(env.data_segments));
-    env.data_segments := List.mapi (fun index old_value -> 
-      if index = segment_index then
-        (assert (old_value = "");
-        new_value)
-      else 
-        old_value
-      ) !(env.data_segments);
-    Int32.of_int (String.length new_value)
-
-  let get_data_segments (env : t) =
-    !(env.data_segments)
-
-  let object_pool_add (env : t) (allocation : t -> G.t) : int32 =
-    if !(env.object_pool.frozen) then raise (Invalid_argument "Object pool frozen");
-    let index = List.length !(env.object_pool.objects) in
-    env.object_pool.objects := !(env.object_pool.objects) @ [ allocation ];
-    Int32.of_int index
-
-  let object_pool_size (env : t) : int =
-    List.length !(env.object_pool.objects)
-
-  let iterate_object_pool (env : t) f =
-    G.concat_mapi f !(env.object_pool.objects)
+      let ptr = add_mutable_static_bytes env b  in
+      env.static_strings := StringEnv.add b ptr !(env.static_strings);
+      ptr
+
+  let object_pool_find (env: t) (key: string) : int32 option =
+    StringEnv.find_opt key !(env.object_pool)
+
+  let object_pool_add (env: t) (key: string) (ptr : int32)  : unit =
+    env.object_pool := StringEnv.add key ptr !(env.object_pool);
+    ()
+
+  let add_static_unskewed (env : t) (data : StaticBytes.t) : int32 =
+    Int32.add (add_static env data) ptr_unskew
+
+  let get_end_of_static_memory env : int32 =
+    env.static_memory_frozen := true;
+    !(env.end_of_static_memory)
+
+  let add_static_root (env : t) ptr =
+    env.static_roots := ptr :: !(env.static_roots)
+
+  let get_static_roots (env : t) =
+    !(env.static_roots)
+
+  let get_static_memory env =
+    !(env.static_memory)
+
+  let mem_size env =
+    Int32.(add (div (get_end_of_static_memory env) page_size) 1l)
+
+  let gc_strategy_name gc_strategy = match gc_strategy with
+    | Flags.MarkCompact -> "compacting"
+    | Flags.Copying -> "copying"
+    | Flags.Generational -> "generational"
+    | Flags.Incremental -> "incremental"
+    | Flags.Default -> assert false (* Already resolved in `pipeline.ml` *)
 
   let collect_garbage env force =
-    let name = "incremental_gc" in
+    (* GC function name = "schedule_"? ("compacting" | "copying" | "generational" | "incremental") "_gc" *)
+    let name = gc_strategy_name !Flags.gc_strategy in
     let gc_fn = if force || !Flags.force_gc then name else "schedule_" ^ name in
-    call_import env "rts" gc_fn
+    call_import env "rts" (gc_fn ^ "_gc")
 
   (* See Note [Candid subtype checks] *)
   (* NB: we don't bother detecting duplicate registrations here because the code sharing machinery
@@ -798,12 +798,12 @@ module E = struct
   let requires_stable_memory (env : t) =
     !(env.requires_stable_memory)
 
-  let get_memories (env : t) initial_memory_pages =
-    nr {mtype = MemoryType {min = initial_memory_pages; max = None}}
+  let get_memories (env : t) =
+    nr {mtype = MemoryType ({min = Int64.of_int32 (mem_size env); max = None}, I32IndexType)}
     ::
     match mode env with
     | Flags.WASIMode | Flags.WasmMode when !(env.requires_stable_memory) ->
-      [ nr {mtype = MemoryType {min = Int32.zero; max = None}} ]
+      [ nr {mtype = MemoryType ({min = Int64.zero; max = None}, I32IndexType)} ]
     | _ -> []
 end
 
@@ -863,7 +863,18 @@ let compile_xor64_const = function
 let compile_eq64_const i =
   compile_const_64 i ^^
   G.i (Compare (Wasm.Values.I64 I64Op.Eq))
-  
+
+(* more random utilities *)
+
+let bytes_of_int32 (i : int32) : string =
+  let b = Buffer.create 4 in
+  let i = Int32.to_int i in
+  Buffer.add_char b (Char.chr (i land 0xff));
+  Buffer.add_char b (Char.chr ((i lsr 8) land 0xff));
+  Buffer.add_char b (Char.chr ((i lsr 16) land 0xff));
+  Buffer.add_char b (Char.chr ((i lsr 24) land 0xff));
+  Buffer.contents b
+
 (* A common variant of todo *)
 
 let todo_trap env fn se = todo fn se (E.trap_with env ("TODO: " ^ fn))
@@ -921,22 +932,16 @@ let from_0_to_n env mk_body = from_m_to_n env 0l mk_body
 (* Pointer reference and dereference  *)
 
 let load_unskewed_ptr : G.t =
-  G.i (Load {ty = I32Type; align = 2; offset = 0l; sz = None})
+  G.i (Load {ty = I32Type; align = 2; offset = 0L; sz = None})
 
 let store_unskewed_ptr : G.t =
-  G.i (Store {ty = I32Type; align = 2; offset = 0l; sz = None})
+  G.i (Store {ty = I32Type; align = 2; offset = 0L; sz = None})
 
-let load_unskewed_ptr64 : G.t =
-  G.i (Load {ty = I64Type; align = 2; offset = 0l; sz = None})
-
-let store_unskewed_ptr64 : G.t =
-  G.i (Store {ty = I64Type; align = 2; offset = 0l; sz = None})
-  
 let load_ptr : G.t =
-  G.i (Load {ty = I32Type; align = 2; offset = ptr_unskew; sz = None})
+  G.i (Load {ty = I32Type; align = 2; offset = Int64.of_int32 ptr_unskew; sz = None})
 
 let store_ptr : G.t =
-  G.i (Store {ty = I32Type; align = 2; offset = ptr_unskew; sz = None})
+  G.i (Store {ty = I32Type; align = 2; offset = Int64.of_int32 ptr_unskew; sz = None})
 
 module FakeMultiVal = struct
   (* For some use-cases (e.g. processing the compiler output with analysis
@@ -1067,7 +1072,7 @@ module Func = struct
       g3
       g4
     )
-  let [@warning "-8"] _share_code6 sharing env name (p1, p2, p3, p4, p5, p6) retty mk_body =
+  let [@warning "-8"] share_code6 sharing env name (p1, p2, p3, p4, p5, p6) retty mk_body =
     share_code sharing env name [p1; p2; p3; p4; p5; p6] retty (fun env [g1; g2; g3; g4; g5; g6] -> mk_body env
       g1
       g2
@@ -1104,29 +1109,35 @@ module Func = struct
 end (* Func *)
 
 module RTS = struct
-  (* The connection to the C and Rust parts of the RTS *)
-  let system_imports env =
+  let incremental_gc_imports env =
     E.add_func_import env "rts" "initialize_incremental_gc" [] [];
     E.add_func_import env "rts" "schedule_incremental_gc" [] [];
     E.add_func_import env "rts" "incremental_gc" [] [];
     E.add_func_import env "rts" "write_with_barrier" [I32Type; I32Type] [];
     E.add_func_import env "rts" "allocation_barrier" [I32Type] [I32Type];
+    E.add_func_import env "rts" "stop_gc_on_upgrade" [] [];
     E.add_func_import env "rts" "running_gc" [] [I32Type];
-    E.add_func_import env "rts" "register_stable_type" [I32Type; I32Type] [];
-    E.add_func_import env "rts" "load_stable_actor" [] [I32Type];
-    E.add_func_import env "rts" "save_stable_actor" [I32Type] [];
-    E.add_func_import env "rts" "free_stable_actor" [] [];
-    E.add_func_import env "rts" "contains_field" [I32Type; I32Type] [I32Type];
-    E.add_func_import env "rts" "initialize_static_variables" [I32Type] [];
-    E.add_func_import env "rts" "get_static_variable" [I32Type] [I32Type];
-    E.add_func_import env "rts" "set_static_variable" [I32Type; I32Type] [];
-    E.add_func_import env "rts" "set_upgrade_instructions" [I64Type] [];
-    E.add_func_import env "rts" "get_upgrade_instructions" [] [I64Type];
+    ()
+
+  let non_incremental_gc_imports env =
+    E.add_func_import env "rts" "initialize_copying_gc" [] [];
+    E.add_func_import env "rts" "initialize_compacting_gc" [] [];
+    E.add_func_import env "rts" "initialize_generational_gc" [] [];
+    E.add_func_import env "rts" "schedule_copying_gc" [] [];
+    E.add_func_import env "rts" "schedule_compacting_gc" [] [];
+    E.add_func_import env "rts" "schedule_generational_gc" [] [];
+    E.add_func_import env "rts" "copying_gc" [] [];
+    E.add_func_import env "rts" "compacting_gc" [] [];
+    E.add_func_import env "rts" "generational_gc" [] [];
+    E.add_func_import env "rts" "post_write_barrier" [I32Type] [];
+    ()
+
+  (* The connection to the C and Rust parts of the RTS *)
+  let system_imports env =
     E.add_func_import env "rts" "memcpy" [I32Type; I32Type; I32Type] [I32Type]; (* standard libc memcpy *)
     E.add_func_import env "rts" "memcmp" [I32Type; I32Type; I32Type] [I32Type];
     E.add_func_import env "rts" "version" [] [I32Type];
     E.add_func_import env "rts" "parse_idl_header" [I32Type; I32Type; I32Type; I32Type; I32Type] [];
-    E.add_func_import env "rts" "idl_alloc_typtbl" [I32Type; I32Type; I32Type; I32Type; I32Type] [];
     E.add_func_import env "rts" "idl_sub_buf_words" [I32Type; I32Type] [I32Type];
     E.add_func_import env "rts" "idl_sub_buf_init" [I32Type; I32Type; I32Type] [];
     E.add_func_import env "rts" "idl_sub"
@@ -1164,10 +1175,12 @@ module RTS = struct
     E.add_func_import env "rts" "bigint_abs" [I32Type] [I32Type];
     E.add_func_import env "rts" "bigint_leb128_size" [I32Type] [I32Type];
     E.add_func_import env "rts" "bigint_leb128_encode" [I32Type; I32Type] [];
+    E.add_func_import env "rts" "bigint_leb128_stream_encode" [I32Type; I32Type] [];
     E.add_func_import env "rts" "bigint_leb128_decode" [I32Type] [I32Type];
     E.add_func_import env "rts" "bigint_leb128_decode_word64" [I64Type; I64Type; I32Type] [I32Type];
     E.add_func_import env "rts" "bigint_sleb128_size" [I32Type] [I32Type];
     E.add_func_import env "rts" "bigint_sleb128_encode" [I32Type; I32Type] [];
+    E.add_func_import env "rts" "bigint_sleb128_stream_encode" [I32Type; I32Type] [];
     E.add_func_import env "rts" "bigint_sleb128_decode" [I32Type] [I32Type];
     E.add_func_import env "rts" "bigint_sleb128_decode_word64" [I64Type; I64Type; I32Type] [I32Type];
     E.add_func_import env "rts" "leb128_encode" [I32Type; I32Type] [];
@@ -1249,6 +1262,18 @@ module RTS = struct
     E.add_func_import env "rts" "get_heap_size" [] [I32Type];
     E.add_func_import env "rts" "alloc_blob" [I32Type; I32Type] [I32Type];
     E.add_func_import env "rts" "alloc_array" [I32Type; I32Type] [I32Type];
+    E.add_func_import env "rts" "alloc_stream" [I32Type] [I32Type];
+    E.add_func_import env "rts" "stream_write" [I32Type; I32Type; I32Type] [];
+    E.add_func_import env "rts" "stream_write_byte" [I32Type; I32Type] [];
+    E.add_func_import env "rts" "stream_write_text" [I32Type; I32Type] [];
+    E.add_func_import env "rts" "stream_split" [I32Type] [I32Type];
+    E.add_func_import env "rts" "stream_shutdown" [I32Type] [];
+    E.add_func_import env "rts" "stream_reserve" [I32Type; I32Type] [I32Type];
+    E.add_func_import env "rts" "stream_stable_dest" [I32Type; I64Type; I64Type] [];
+    if !Flags.gc_strategy = Flags.Incremental then
+      incremental_gc_imports env
+    else
+      non_incremental_gc_imports env;
     ()
 
 end (* RTS *)
@@ -1262,7 +1287,9 @@ module GC = struct
 
   let register_globals env =
     E.add_global64 env "__mutator_instructions" Mutable 0L;
-    E.add_global64 env "__collector_instructions" Mutable 0L
+    E.add_global64 env "__collector_instructions" Mutable 0L;
+    if !Flags.gc_strategy <> Flags.Incremental then
+      E.add_global32 env "_HP" Mutable 0l
 
   let get_mutator_instructions env =
     G.i (GlobalGet (nr (E.get_global env "__mutator_instructions")))
@@ -1274,6 +1301,17 @@ module GC = struct
   let set_collector_instructions env =
     G.i (GlobalSet (nr (E.get_global env "__collector_instructions")))
 
+  let get_heap_pointer env =
+    if !Flags.gc_strategy <> Flags.Incremental then
+      G.i (GlobalGet (nr (E.get_global env "_HP")))
+    else
+      assert false
+  let set_heap_pointer env =
+    if !Flags.gc_strategy <> Flags.Incremental then
+      G.i (GlobalSet (nr (E.get_global env "_HP")))
+    else
+      assert false
+
   let record_mutator_instructions env =
     match E.mode env with
     | Flags.(ICMode | RefMode)  ->
@@ -1328,39 +1366,50 @@ module Heap = struct
     compile_unboxed_const n ^^
     E.call_import env "rts" "alloc_words"
 
+  let ensure_allocated env =
+    alloc env 0l ^^ G.i Drop (* dummy allocation, ensures that the page HP points into is backed *)
+
   (* Heap objects *)
 
   (* At this level of abstraction, heap objects are just flat arrays of words *)
 
+  let load_field_unskewed (i : int32) : G.t =
+    let offset = Int32.mul word_size i in
+    G.i (Load {ty = I32Type; align = 2; offset = Int64.of_int32 offset; sz = None})
+
   let load_field (i : int32) : G.t =
     let offset = Int32.(add (mul word_size i) ptr_unskew) in
-    G.i (Load {ty = I32Type; align = 2; offset; sz = None})
+    G.i (Load {ty = I32Type; align = 2; offset = Int64.of_int32 offset; sz = None})
 
   let store_field (i : int32) : G.t =
     let offset = Int32.(add (mul word_size i) ptr_unskew) in
-    G.i (Store {ty = I32Type; align = 2; offset; sz = None})
+    G.i (Store {ty = I32Type; align = 2; offset = Int64.of_int32 offset; sz = None})
 
   (* Although we occasionally want to treat two consecutive
      32 bit fields as one 64 bit number *)
 
-  (* Requires little-endian encoding *)
+  (* Requires little-endian encoding, see also `Stream` in `types.rs` *)
+  let load_field64_unskewed (i : int32) : G.t =
+    let offset = Int32.mul word_size i in
+    G.i (Load {ty = I64Type; align = 2; offset = Int64.of_int32 offset; sz = None})
+
   let load_field64 (i : int32) : G.t =
     let offset = Int32.(add (mul word_size i) ptr_unskew) in
-    G.i (Load {ty = I64Type; align = 2; offset; sz = None})
+    G.i (Load {ty = I64Type; align = 2; offset = Int64.of_int32 offset; sz = None})
 
   let store_field64 (i : int32) : G.t =
     let offset = Int32.(add (mul word_size i) ptr_unskew) in
-    G.i (Store {ty = I64Type; align = 2; offset; sz = None})
+    G.i (Store {ty = I64Type; align = 2; offset = Int64.of_int32 offset; sz = None})
 
   (* Or even as a single 64 bit float *)
 
   let load_field_float64 (i : int32) : G.t =
     let offset = Int32.(add (mul word_size i) ptr_unskew) in
-    G.i (Load {ty = F64Type; align = 2; offset; sz = None})
+    G.i (Load {ty = F64Type; align = 2; offset = Int64.of_int32 offset; sz = None})
 
   let store_field_float64 (i : int32) : G.t =
     let offset = Int32.(add (mul word_size i) ptr_unskew) in
-    G.i (Store {ty = F64Type; align = 2; offset; sz = None})
+    G.i (Store {ty = F64Type; align = 2; offset = Int64.of_int32 offset; sz = None})
 
   (* Convenience functions related to memory *)
   (* Copying bytes (works on unskewed memory addresses) *)
@@ -1381,10 +1430,6 @@ module Heap = struct
   let get_heap_size env =
     E.call_import env "rts" "get_heap_size"
 
-  let get_static_variable env index = 
-    compile_unboxed_const index ^^
-    E.call_import env "rts" "get_static_variable"
-
 end (* Heap *)
 
 module Stack = struct
@@ -1401,10 +1446,12 @@ module Stack = struct
      grows downwards.)
   *)
 
-  (* Predefined constant stack size of 4MB, according to the persistent memory layout. *)
-  let stack_size = 4 * 1024 * 1024
+  let rts_stack_pages () = match !Flags.rts_stack_pages with
+  | None -> assert false (* Already resolved by `pipeline.ml` *)
+  | Some pages -> pages
 
-  let end_ () = Int32.of_int stack_size 
+  let end_ () = 
+    Int32.mul (Int32.of_int (rts_stack_pages ())) page_size
 
   let register_globals env =
     (* stack pointer *)
@@ -1449,14 +1496,14 @@ module Stack = struct
     Func.share_code0 Func.Never env "stack_overflow" [] (fun env ->
       (* read last word of reserved page to force trap *)
       compile_unboxed_const 0xFFFF_FFFCl ^^
-      G.i (Load {ty = I32Type; align = 2; offset = 0l; sz = None}) ^^
+      G.i (Load {ty = I32Type; align = 2; offset = 0L; sz = None}) ^^
       G.i Unreachable
     )
 
   let alloc_words env n =
     let n_bytes = Int32.mul n Heap.word_size in
     (* avoid absurd allocations *)
-    assert (Int32.(to_int n_bytes) < stack_size);
+    assert Int32.(to_int n_bytes < (rts_stack_pages ()) * to_int page_size);
     (* alloc words *)
     get_stack_ptr env ^^
     compile_unboxed_const n_bytes ^^
@@ -1544,7 +1591,7 @@ module Stack = struct
     alloc_words env (Int32.add n 1l) ^^
     (* store the current frame_ptr at offset 0*)
     get_frame_ptr env ^^
-    G.i (Store {ty = I32Type; align = 2; offset = 0l; sz = None}) ^^
+    G.i (Store {ty = I32Type; align = 2; offset = 0L; sz = None}) ^^
     get_stack_ptr env ^^
     (* set_frame_ptr to stack_ptr *)
     set_frame_ptr env ^^
@@ -1557,7 +1604,7 @@ module Stack = struct
     E.else_trap_with env "frame_ptr <> stack_ptr" ^^
     (* restore the saved frame_ptr *)
     get_frame_ptr env ^^
-    G.i (Load {ty = I32Type; align = 2; offset = 0l; sz = None}) ^^
+    G.i (Load {ty = I32Type; align = 2; offset = 0L; sz = None}) ^^
     set_frame_ptr env ^^
     (* free the frame *)
     free_words env (Int32.add n 1l)
@@ -1566,15 +1613,15 @@ module Stack = struct
   let get_local env n =
     let offset = Int32.mul (Int32.add n 1l) Heap.word_size in
     get_frame_ptr env ^^
-      G.i (Load { ty = I32Type; align = 2; offset; sz = None})
+      G.i (Load { ty = I32Type; align = 2; offset = Int64.of_int32 offset; sz = None})
 
   (* read local n of previous frame *)
   let get_prev_local env n =
     let offset = Int32.mul (Int32.add n 1l) Heap.word_size in
     (* indirect through save frame_ptr at offset 0 *)
     get_frame_ptr env ^^
-    G.i (Load { ty = I32Type; align = 2; offset = 0l; sz = None}) ^^
-    G.i (Load { ty = I32Type; align = 2; offset; sz = None})
+    G.i (Load { ty = I32Type; align = 2; offset = 0L; sz = None}) ^^
+    G.i (Load { ty = I32Type; align = 2; offset = Int64.of_int32 offset; sz = None})
 
   (* set local n of current frame *)
   let set_local env n =
@@ -1583,7 +1630,7 @@ module Stack = struct
       (fun env get_val ->
          get_frame_ptr env ^^
          get_val ^^
-         G.i (Store { ty = I32Type; align = 2; offset; sz = None}))
+         G.i (Store { ty = I32Type; align = 2; offset = Int64.of_int32 offset; sz = None}))
 
 end (* Stack *)
 
@@ -1934,6 +1981,7 @@ end (* BitTagged *)
 
 module Tagged = struct
   (* Tagged objects all have an object header consisting of a tag and a forwarding pointer.
+     The forwarding pointer is only reserved if compiled for the incremental GC.
      The tag is to describe their runtime type and serves to traverse the heap
      (serialization, GC), but also for objectification of arrays.
 
@@ -1946,9 +1994,15 @@ module Tagged = struct
      │ tag  │ fwd ptr │ ...
      └──────┴─────────┴──
 
+     The copying GC requires that all tagged objects in the dynamic heap space have at least
+     two words in order to replace them by `Indirection`. This condition is except for `Null`
+     that only lives in static heap space and is therefore not replaced by `Indirection` during
+     copying GC.
+
      Attention: This mapping is duplicated in these places
        * here
        * motoko-rts/src/types.rs
+       * motoko-rts/src/stream.rs
        * motoko-rts/src/text.rs
        * motoko-rts/src/memory.rs
        * motoko-rts/src/bigint.rs
@@ -1986,6 +2040,7 @@ module Tagged = struct
     | Bits32 of bits_sort (* Contains a 32 bit value *)
     | BigInt
     | Concat (* String concatenation, used by rts/text.c *)
+    | Null (* For opt. Static singleton! *)
     | OneWordFiller (* Only used by the RTS *)
     | FreeSpace (* Only used by the RTS *)
     | Region
@@ -1998,6 +2053,8 @@ module Tagged = struct
 
      (Reminder: objects and fields are word-aligned so will have the lowest two
      bits unset) *)
+  (* Reordered with combined modes of classical and enhanced orthogonal persistence,
+     see `types.rs` *)
   let int_of_tag = function
     | Object -> 1l
     | Array I -> 3l
@@ -2016,63 +2073,80 @@ module Tagged = struct
     | Blob P -> 29l
     | Blob A -> 31l
     | Indirection -> 33l
-    | Bits32 U -> 35l
-    | Bits32 S -> 37l
-    | Bits32 F -> 39l
-    | BigInt -> 41l
-    | Concat -> 43l
-    | Region -> 45l
-    | OneWordFiller -> 47l
-    | FreeSpace -> 49l
-    | ArraySliceMinimum -> 50l
+    | BigInt -> 35l
+    | Concat -> 37l
+    | Region -> 39l
+    (* Only used during 32-bit classical persistence mode. *)
+    | Bits32 U -> 41l
+    | Bits32 S -> 43l
+    | Bits32 F -> 45l
+    | Null -> 47l
+    (* RTS-internal *)
+    | OneWordFiller -> 49l
+    | FreeSpace -> 51l
+    | ArraySliceMinimum -> 52l
     (* Next two tags won't be seen by the GC, so no need to set the lowest bit
        for `CoercionFailure` and `StableSeen` *)
     | CoercionFailure -> 0xfffffffel
     | StableSeen -> 0xffffffffl
 
-  (*
-     The null pointer is the sentinel `0xffff_fffbl` (skewed representation).
-    
-     This serves for efficient null tests by using direct pointer comparison.
-     The null pointer must not be dereferenced.
-     Null tests are possible without resolving the forwarding pointer of a non-null comparand.
-  *)
-
-  let null_vanilla_pointer = 0xffff_fffbl (* skewed, pointing to last unallocated Wasm page *)
-  let null_pointer = compile_unboxed_const null_vanilla_pointer
-
-  let is_null env = compile_eq_const null_vanilla_pointer
+  (* Declare `env` for lazy computation of the header size when the compile environment with compile flags are defined *)
+  let header_size env =
+    if !Flags.gc_strategy = Flags.Incremental then 2l else 1l
 
-  let not_null env =
-    (* null test works without forwarding pointer resolution of a non-null comparand *)
-    null_pointer ^^ 
-    G.i (Compare (Wasm.Values.I32 I32Op.Ne))  
-
-  let header_size = 2l
-  
   (* The tag *)
   let tag_field = 0l
-  let forwarding_pointer_field = 1l
+  let forwarding_pointer_field env =
+    assert (!Flags.gc_strategy = Flags.Incremental);
+    1l
 
   (* Note: post-allocation barrier must be applied after initialization *)
   let alloc env size tag =
     assert (size > 1l);
     let name = Printf.sprintf "alloc_size<%d>_tag<%d>" (Int32.to_int size) (Int32.to_int (int_of_tag tag)) in
-
+    (* Computes a (conservative) mask for the bumped HP, so that the existence of non-zero bits under it
+       guarantees that a page boundary crossing didn't happen (i.e. no ripple-carry). *)
+    let overflow_mask increment =
+      let n = Int32.to_int increment in
+      assert (n > 0 && n < 0x8000);
+      let page_mask = Int32.sub page_size 1l in
+      (* We can extend the mask to the right if the bump increment is a power of two. *)
+      let ext = if Numerics.Nat16.(to_int (popcnt (of_int n))) = 1 then increment else 0l in
+      Int32.(logor ext (logand page_mask (shift_left minus_one (16 - Numerics.Nat16.(to_int (clz (of_int n))))))) in
+    (* always inline *)
     Func.share_code0 Func.Never env name [I32Type] (fun env ->
       let set_object, get_object = new_local env "new_object" in
-      Heap.alloc env size ^^
+      let size_in_bytes = Int32.(mul size Heap.word_size) in
+      let half_page_size = Int32.div page_size 2l in
+      (if !Flags.gc_strategy <> Flags.Incremental && size_in_bytes < half_page_size then
+         GC.get_heap_pointer env ^^
+         GC.get_heap_pointer env ^^
+         compile_add_const size_in_bytes ^^
+         GC.set_heap_pointer env ^^
+         GC.get_heap_pointer env ^^
+         compile_bitand_const (overflow_mask size_in_bytes) ^^
+         G.if0
+           G.nop (* no page crossing *)
+           (Heap.ensure_allocated env) (* ensure that HP's page is allocated *)
+       else
+         Heap.alloc env size) ^^
       set_object ^^ get_object ^^
       compile_unboxed_const (int_of_tag tag) ^^
       Heap.store_field tag_field ^^
-      get_object ^^ (* object pointer *)
-      get_object ^^ (* forwarding pointer *)
-      Heap.store_field forwarding_pointer_field ^^
+      (if !Flags.gc_strategy = Flags.Incremental then
+        get_object ^^ (* object pointer *)
+        get_object ^^ (* forwarding pointer *)
+        Heap.store_field (forwarding_pointer_field env)
+      else
+        G.nop) ^^
       get_object
     )
 
   let load_forwarding_pointer env =
-    Heap.load_field forwarding_pointer_field
+    (if !Flags.gc_strategy = Flags.Incremental then
+      Heap.load_field (forwarding_pointer_field env)
+    else
+      G.nop)
 
   let store_tag env tag =
     load_forwarding_pointer env ^^
@@ -2101,29 +2175,33 @@ module Tagged = struct
       G.nop
 
   let check_forwarding env unskewed =
-    let name = "check_forwarding_" ^ if unskewed then "unskewed" else "skewed" in
-    Func.share_code1 Func.Always env name ("object", I32Type) [I32Type] (fun env get_object ->
-      let set_object = G.setter_for get_object in
-      (if unskewed then
+    (if !Flags.gc_strategy = Flags.Incremental then
+      let name = "check_forwarding_" ^ if unskewed then "unskewed" else "skewed" in
+      Func.share_code1 Func.Always env name ("object", I32Type) [I32Type] (fun env get_object ->
+        let set_object = G.setter_for get_object in
+        (if unskewed then
+          get_object ^^
+          compile_unboxed_const ptr_skew ^^
+          G.i (Binary (Wasm.Values.I32 I32Op.Add)) ^^
+          set_object
+        else G.nop) ^^
         get_object ^^
-        compile_unboxed_const ptr_skew ^^
-        G.i (Binary (Wasm.Values.I32 I32Op.Add)) ^^
-        set_object
-      else G.nop) ^^
-      get_object ^^
-      load_forwarding_pointer env ^^
-      get_object ^^
-      G.i (Compare (Wasm.Values.I32 I32Op.Eq)) ^^
-      E.else_trap_with env "missing object forwarding" ^^
-      get_object ^^
-      (if unskewed then
-        compile_unboxed_const ptr_unskew ^^
-        G.i (Binary (Wasm.Values.I32 I32Op.Add))
-      else G.nop))
-  
+        load_forwarding_pointer env ^^
+        get_object ^^
+        G.i (Compare (Wasm.Values.I32 I32Op.Eq)) ^^
+        E.else_trap_with env "missing object forwarding" ^^
+        get_object ^^
+        (if unskewed then
+          compile_unboxed_const ptr_unskew ^^
+          G.i (Binary (Wasm.Values.I32 I32Op.Add))
+        else G.nop))
+    else G.nop)
+
   let check_forwarding_for_store env typ =
-    let (set_value, get_value, _) = new_local_ env typ "value" in
-    set_value ^^ check_forwarding env false ^^ get_value
+    (if !Flags.gc_strategy = Flags.Incremental then
+      let (set_value, get_value, _) = new_local_ env typ "value" in
+      set_value ^^ check_forwarding env false ^^ get_value
+    else G.nop)
 
   let load_field env index =
     (if !Flags.sanity then check_forwarding env false else G.nop) ^^
@@ -2133,6 +2211,14 @@ module Tagged = struct
     (if !Flags.sanity then check_forwarding_for_store env I32Type else G.nop) ^^
     Heap.store_field index
 
+  let load_field_unskewed env index =
+    (if !Flags.sanity then check_forwarding env true else G.nop) ^^
+    Heap.load_field_unskewed index
+
+  let load_field64_unskewed env index =
+    (if !Flags.sanity then check_forwarding env true else G.nop) ^^
+    Heap.load_field64_unskewed index
+
   let load_field64 env index =
     (if !Flags.sanity then check_forwarding env false else G.nop) ^^
     Heap.load_field64 index
@@ -2166,7 +2252,10 @@ module Tagged = struct
     go cases
 
   let allocation_barrier env =
-    E.call_import env "rts" "allocation_barrier"
+    (if !Flags.gc_strategy = Flags.Incremental then
+      E.call_import env "rts" "allocation_barrier"
+    else
+      G.nop)
 
   let write_with_barrier env =
     let (set_value, get_value) = new_local env "written_value" in
@@ -2184,29 +2273,46 @@ module Tagged = struct
 
   let obj env tag element_instructions : G.t =
     let n = List.length element_instructions in
-    let size = (Int32.add (Wasm.I32.of_int_u n) header_size) in
+    let size = (Int32.add (Wasm.I32.of_int_u n) (header_size env)) in
     let (set_object, get_object) = new_local env "new_object" in
     alloc env size tag ^^
     set_object ^^
     let init_elem idx instrs : G.t =
       get_object ^^
       instrs ^^
-      Heap.store_field (Int32.add (Wasm.I32.of_int_u idx) header_size)
+      Heap.store_field (Int32.add (Wasm.I32.of_int_u idx) (header_size env))
     in
     G.concat_mapi init_elem element_instructions ^^
     get_object ^^
     allocation_barrier env
 
-  let shared_object env allocation =
-    let index = E.object_pool_add env allocation in
-    E.SharedObject index
-
-  let materialize_shared_value env = function
-  | E.Vanilla vanilla -> compile_unboxed_const vanilla
-  | E.SharedObject index -> Heap.get_static_variable env index
-
-  let share env allocation =
-    materialize_shared_value env (shared_object env allocation)
+  let new_static_obj env tag payload =
+    let payload = StaticBytes.as_bytes payload in
+    let header_size = Int32.(mul Heap.word_size (header_size env)) in
+    let size = Int32.(add header_size (Int32.of_int (String.length payload))) in
+    let unskewed_ptr = E.reserve_static_memory env size in
+    let skewed_ptr = Int32.(add unskewed_ptr ptr_skew) in
+    let tag = bytes_of_int32 (int_of_tag tag) in
+    let forward = bytes_of_int32 skewed_ptr in (* forwarding pointer *)
+    (if !Flags.gc_strategy = Flags.Incremental then
+      let incremental_gc_data = tag ^ forward ^ payload in
+      E.write_static_memory env unskewed_ptr incremental_gc_data
+    else
+      let non_incremental_gc_data = tag ^ payload in
+      E.write_static_memory env unskewed_ptr non_incremental_gc_data
+    );
+    skewed_ptr
+
+  let shared_static_obj env tag payload =
+    let tag_word = bytes_of_int32 (int_of_tag tag) in
+    let payload_bytes = StaticBytes.as_bytes payload in
+    let key = tag_word ^ payload_bytes in
+    match E.object_pool_find env key with
+    | Some ptr -> ptr (* no forwarding pointer dereferencing needed as static objects do not move *)
+    | None ->
+      let ptr = new_static_obj env tag payload in
+      E.object_pool_add env key ptr;
+      ptr
 
 end (* Tagged *)
 
@@ -2219,6 +2325,7 @@ module MutBox = struct
        └──────┴─────┴─────────┘
 
      The object header includes the obj tag (MutBox) and the forwarding pointer.
+     The forwarding pointer is only reserved if compiled for the incremental GC.
   *)
 
   let field = Tagged.header_size
@@ -2226,27 +2333,38 @@ module MutBox = struct
   let alloc env =
     Tagged.obj env Tagged.MutBox [ compile_unboxed_zero ]
 
+  let static env =
+    let ptr = Tagged.new_static_obj env Tagged.MutBox StaticBytes.[
+      I32 0l; (* zero *)
+    ] in
+    E.add_static_root env ptr;
+    ptr
+
   let load_field env =
     Tagged.load_forwarding_pointer env ^^
-    Tagged.load_field env field
+    Tagged.load_field env (field env)
 
   let store_field env =
     let (set_mutbox_value, get_mutbox_value) = new_local env "mutbox_value" in
     set_mutbox_value ^^
     Tagged.load_forwarding_pointer env ^^
     get_mutbox_value ^^
-    Tagged.store_field env field
-  
-  let add_global_mutbox env =
-    E.object_pool_add env alloc
+    Tagged.store_field env (field env)
 end
 
 
 module Opt = struct
   (* The Option type. Optional values are represented as
 
-    1. The null literal being the sentinel null pointer value, see above.
-       
+    1. ┌──────┐
+       │ null │
+       └──────┘
+
+       A special null value. It is fully static, and because it is unique, can
+       be recognized by pointer comparison (only the GC will care about the heap
+       tag).
+
+
     2. ┌──────┬─────────┐
        │ some │ payload │
        └──────┴─────────┘
@@ -2268,14 +2386,21 @@ module Opt = struct
 
   let some_payload_field = Tagged.header_size
 
-  let null_vanilla_lit = Tagged.null_vanilla_pointer
-  let null_lit env = Tagged.null_pointer
+  (* This relies on the fact that add_static deduplicates *)
+  let null_vanilla_lit env : int32 =
+    Tagged.shared_static_obj env Tagged.Null []
+
+  let null_lit env =
+    compile_unboxed_const (null_vanilla_lit env)
 
-  let is_null = Tagged.is_null
-  let is_some = Tagged.not_null
+  let vanilla_lit env ptr : int32 =
+    Tagged.shared_static_obj env Tagged.Some StaticBytes.[
+      I32 ptr
+    ]
 
-  let alloc_some env get_payload =
-    Tagged.obj env Tagged.Some [ get_payload ]
+ let is_some env =
+    null_lit env ^^
+    G.i (Compare (Wasm.Values.I32 I32Op.Ne))
 
   let inject env e =
     e ^^
@@ -2285,26 +2410,19 @@ module Opt = struct
         ( get_x ^^ BitTagged.is_true_literal env ^^ (* exclude true literal since `branch_default` follows the forwarding pointer *)
           E.if_ env [I32Type]
             ( get_x ) (* true literal, no wrapping *)
-            ( get_x ^^ is_some env ^^
-              E.if_ env [I32Type]
-                ( get_x ^^ Tagged.branch_default env [I32Type]
-                  ( get_x ) (* default tag, no wrapping *)
-                  [ Tagged.Some, alloc_some env get_x ]
-                )
-                ( alloc_some env get_x ) (* ?ⁿnull for n > 0 *)
+            ( get_x ^^ Tagged.branch_default env [I32Type]
+              ( get_x ) (* default tag, no wrapping *)
+              [ Tagged.Null,
+                (* NB: even ?null does not require allocation: We use a static
+                  singleton for that: *)
+                compile_unboxed_const (vanilla_lit env (null_vanilla_lit env))
+              ; Tagged.Some,
+                Tagged.obj env Tagged.Some [get_x]
+              ]
             )
         )
     )
 
-  let constant env = function
-  | E.Vanilla value when value = null_vanilla_lit -> Tagged.shared_object env (fun env -> alloc_some env (null_lit env)) (* ?ⁿnull for n > 0 *)
-  | E.Vanilla value -> E.Vanilla value (* not null and no `Opt` object *)
-  | shared_value ->
-    Tagged.shared_object env (fun env -> 
-      let materialized_value = Tagged.materialize_shared_value env shared_value in  
-      inject env materialized_value (* potentially wrap in new `Opt` *)
-    )
-
   (* This function is used where conceptually, Opt.inject should be used, but
   we know for sure that it wouldn’t do anything anyways, except dereferencing the forwarding pointer *)
   let inject_simple env e =
@@ -2312,7 +2430,7 @@ module Opt = struct
 
   let load_some_payload_field env =
     Tagged.load_forwarding_pointer env ^^
-    Tagged.load_field env some_payload_field
+    Tagged.load_field env (some_payload_field env)
 
   let project env =
     Func.share_code1 Func.Never env "opt_project" ("x", I32Type) [I32Type] (fun env get_x ->
@@ -2323,7 +2441,11 @@ module Opt = struct
             ( get_x ) (* true literal, no wrapping *)
             ( get_x ^^ Tagged.branch_default env [I32Type]
               ( get_x ) (* default tag, no wrapping *)
-              [ Tagged.Some, get_x ^^ load_some_payload_field env ]
+              [ Tagged.Some,
+                get_x ^^ load_some_payload_field env
+              ; Tagged.Null,
+                E.trap_with env "Internal error: opt_project: null!"
+              ]
             )
         )
     )
@@ -2340,10 +2462,11 @@ module Variant = struct
        └──────┴─────┴────────────┴─────────┘
 
      The object header includes the obj tag (TAG_VARIANT) and the forwarding pointer.
+     The forwarding pointer is only reserved if compiled for the incremental GC.
   *)
 
   let variant_tag_field = Tagged.header_size
-  let payload_field = Int32.add variant_tag_field 1l
+  let payload_field env = Int32.add (variant_tag_field env) 1l
 
   let hash_variant_label env : Mo_types.Type.lab -> int32 =
     E.hash env
@@ -2353,17 +2476,23 @@ module Variant = struct
 
   let get_variant_tag env =
     Tagged.load_forwarding_pointer env ^^
-    Tagged.load_field env variant_tag_field
+    Tagged.load_field env (variant_tag_field env)
 
   let project env =
     Tagged.load_forwarding_pointer env ^^
-    Tagged.load_field env payload_field
+    Tagged.load_field env (payload_field env)
 
   (* Test if the top of the stack points to a variant with this label *)
   let test_is env l =
     get_variant_tag env ^^
     compile_eq_const (hash_variant_label env l)
 
+  let vanilla_lit env i ptr =
+    Tagged.shared_static_obj env Tagged.Variant StaticBytes.[
+      I32 (hash_variant_label env i);
+      I32 ptr
+    ]
+
 end (* Variant *)
 
 
@@ -2378,22 +2507,24 @@ module Closure = struct
        └──────┴─────┴───────┴──────┴──────────────┘
 
      The object header includes the object tag (TAG_CLOSURE) and the forwarding pointer.
+     The forwarding pointer is only reserved if compiled for the incremental GC.
+
   *)
-  let header_size = Int32.add Tagged.header_size 2l
+  let header_size env = Int32.add (Tagged.header_size env) 2l
 
   let funptr_field = Tagged.header_size
-  let len_field = Int32.add 1l Tagged.header_size
+  let len_field env = Int32.add 1l (Tagged.header_size env)
 
   let load_data env i =
     Tagged.load_forwarding_pointer env ^^
-    Tagged.load_field env (Int32.add header_size i)
+    Tagged.load_field env (Int32.add (header_size env) i)
 
   let store_data env i =
     let (set_closure_data, get_closure_data) = new_local env "closure_data" in
     set_closure_data ^^
     Tagged.load_forwarding_pointer env ^^
     get_closure_data ^^
-    Tagged.store_field env (Int32.add header_size i)
+    Tagged.store_field env (Int32.add (header_size env) i)
 
   let prepare_closure_call env =
     Tagged.load_forwarding_pointer env
@@ -2411,17 +2542,16 @@ module Closure = struct
       FakeMultiVal.ty (Lib.List.make n_res I32Type))) in
     (* get the table index *)
     Tagged.load_forwarding_pointer env ^^
-    Tagged.load_field env funptr_field ^^
+    Tagged.load_field env (funptr_field env) ^^
     (* All done: Call! *)
     G.i (CallIndirect (nr ty)) ^^
     FakeMultiVal.load env (Lib.List.make n_res I32Type)
 
-  let constant env get_fi =
-    let fi = E.add_fun_ptr env (get_fi ()) in
-    Tagged.shared_object env (fun env -> Tagged.obj env Tagged.Closure [
-      compile_unboxed_const fi;
-      compile_unboxed_const 0l
-    ])
+  let static_closure env fi : int32 =
+    Tagged.shared_static_obj env Tagged.Closure StaticBytes.[
+      I32 (E.add_fun_ptr env fi);
+      I32 0l
+    ]
 
 end (* Closure *)
 
@@ -2439,6 +2569,8 @@ module BoxedWord64 = struct
        └──────┴─────┴─────┴─────┘
 
      The object header includes the object tag (Bits64) and the forwarding pointer.
+     The forwarding pointer is only reserved if compiled for the incremental GC.
+
   *)
 
   let payload_field = Tagged.header_size
@@ -2449,24 +2581,25 @@ module BoxedWord64 = struct
     | Type.Int64 -> Tagged.(Bits64 S)
     | _ -> assert false
 
+  let vanilla_lit env pty i =
+    if BitTagged.can_tag_const pty i
+    then BitTagged.tag_const pty i
+    else
+      Tagged.shared_static_obj env (heap_tag env pty) StaticBytes.[
+        I64 i
+      ]
+
   let compile_box env pty compile_elem : G.t =
     let (set_i, get_i) = new_local env "boxed_i64" in
-    let size = 4l in
+    let size = if !Flags.gc_strategy = Flags.Incremental then 4l else 3l in
     Tagged.alloc env size (heap_tag env pty) ^^
     set_i ^^
-    get_i ^^ compile_elem ^^ Tagged.store_field64 env payload_field ^^
+    get_i ^^ compile_elem ^^ Tagged.store_field64 env (payload_field env) ^^
     get_i ^^
     Tagged.allocation_barrier env
 
-  let constant env pty i =
-    if BitTagged.can_tag_const pty i
-    then 
-      E.Vanilla (BitTagged.tag_const pty i)
-    else
-      Tagged.shared_object env (fun env -> compile_box env pty (compile_const_64 i))
-
-  let box env pty = 
-    Func.share_code1 Func.Never env 
+  let box env pty =
+    Func.share_code1 Func.Never env
       (prim_fun_name pty "box64") ("n", I64Type) [I32Type] (fun env get_n ->
       get_n ^^ BitTagged.if_can_tag_i64 env pty [I32Type]
         (get_n ^^ BitTagged.tag env pty)
@@ -2482,7 +2615,7 @@ module BoxedWord64 = struct
         (get_n ^^
          Tagged.load_forwarding_pointer env ^^
          Tagged.(sanity_check_tag __LINE__ env (heap_tag env pty)) ^^
-         Tagged.load_field64 env payload_field)
+         Tagged.load_field64 env (payload_field env))
     )
 end (* BoxedWord64 *)
 
@@ -2585,23 +2718,22 @@ module BoxedSmallWord = struct
     | Type.Int32 -> Tagged.(Bits32 S)
     | _ -> assert false
 
-  let payload_field = Tagged.header_size
+  let payload_field env = Tagged.header_size env
 
-  let constant env pty i =
+  let vanilla_lit env pty i =
     if BitTagged.can_tag_const pty (Int64.of_int (Int32.to_int i))
-    then
-      E.Vanilla (BitTagged.tag_const pty (Int64.of_int (Int32.to_int i)))
+    then BitTagged.tag_const pty (Int64.of_int (Int32.to_int i))
     else
-      Tagged.shared_object env (fun env -> (Tagged.obj env (heap_tag env pty) [
-        compile_unboxed_const i
-      ]))
+      Tagged.shared_static_obj env (heap_tag env pty) StaticBytes.[
+        I32 i
+      ]
 
   let compile_box env pty compile_elem : G.t =
     let (set_i, get_i) = new_local env "boxed_i32" in
-    let size = 3l in
+    let size = if !Flags.gc_strategy = Flags.Incremental then 3l else 2l in
     Tagged.alloc env size (heap_tag env pty) ^^
     set_i ^^
-    get_i ^^ compile_elem ^^ Tagged.store_field env payload_field ^^
+    get_i ^^ compile_elem ^^ Tagged.store_field env (payload_field env) ^^
     get_i ^^
     Tagged.allocation_barrier env
 
@@ -2622,7 +2754,7 @@ module BoxedSmallWord = struct
         (get_n ^^
          Tagged.load_forwarding_pointer env ^^
          Tagged.(sanity_check_tag __LINE__ env (heap_tag env pty)) ^^
-         Tagged.load_field env payload_field)
+         Tagged.load_field env (payload_field env))
     )
 
   let _lit env pty n = compile_unboxed_const n ^^ box env pty
@@ -2865,30 +2997,32 @@ module Float = struct
      debug inspection (or GC representation change) arises.
 
      The object header includes the object tag (Bits64) and the forwarding pointer.
+     The forwarding pointer is only reserved if compiled for the incremental GC.
   *)
 
-  let payload_field = Tagged.header_size
+  let payload_field env = Tagged.header_size env
 
   let compile_unboxed_const f = G.i (Const (nr (Wasm.Values.F64 f)))
 
+  let vanilla_lit env f =
+    Tagged.shared_static_obj env Tagged.(Bits64 F) StaticBytes.[
+      I64 (Wasm.F64.to_bits f)
+    ]
+
   let box env = Func.share_code1 Func.Never env "box_f64" ("f", F64Type) [I32Type] (fun env get_f ->
     let (set_i, get_i) = new_local env "boxed_f64" in
-    let size = Int32.add Tagged.header_size  2l in
+    let size = Int32.add (Tagged.header_size env)  2l in
     Tagged.alloc env size Tagged.(Bits64 F) ^^
     set_i ^^
-    get_i ^^ get_f ^^ Tagged.store_field_float64 env payload_field ^^
+    get_i ^^ get_f ^^ Tagged.store_field_float64 env (payload_field env) ^^
     get_i ^^
     Tagged.allocation_barrier env
   )
 
-  let unbox env = 
-    Tagged.load_forwarding_pointer env ^^ 
+  let unbox env =
+    Tagged.load_forwarding_pointer env ^^
     Tagged.(sanity_check_tag __LINE__ env (Bits64 F)) ^^
-    Tagged.load_field_float64 env payload_field
-
-  let constant env f = Tagged.shared_object env (fun env -> 
-    compile_unboxed_const f ^^ 
-    box env)
+    Tagged.load_field_float64 env (payload_field env)
 
 end (* Float *)
 
@@ -2914,13 +3048,13 @@ module ReadBuf = struct
   *)
 
   let get_ptr get_buf =
-    get_buf ^^ G.i (Load {ty = I32Type; align = 2; offset = 0l; sz = None})
+    get_buf ^^ G.i (Load {ty = I32Type; align = 2; offset = 0L; sz = None})
   let get_end get_buf =
-    get_buf ^^ G.i (Load {ty = I32Type; align = 2; offset = Heap.word_size; sz = None})
+    get_buf ^^ G.i (Load {ty = I32Type; align = 2; offset = Int64.of_int32 Heap.word_size; sz = None})
   let set_ptr get_buf new_val =
-    get_buf ^^ new_val ^^ G.i (Store {ty = I32Type; align = 2; offset = 0l; sz = None})
+    get_buf ^^ new_val ^^ G.i (Store {ty = I32Type; align = 2; offset = 0L; sz = None})
   let set_end get_buf new_val =
-    get_buf ^^ new_val ^^ G.i (Store {ty = I32Type; align = 2; offset = Heap.word_size; sz = None})
+    get_buf ^^ new_val ^^ G.i (Store {ty = I32Type; align = 2; offset = Int64.of_int32 Heap.word_size; sz = None})
   let set_size get_buf get_size =
     set_end get_buf
       (get_ptr get_buf ^^ get_size ^^ G.i (Binary (Wasm.Values.I32 I32Op.Add)))
@@ -2954,19 +3088,19 @@ module ReadBuf = struct
   let read_byte env get_buf =
     check_space env get_buf (compile_unboxed_const 1l) ^^
     get_ptr get_buf ^^
-    G.i (Load {ty = I32Type; align = 0; offset = 0l; sz = Some Wasm.Types.(Pack8, ZX)}) ^^
+    G.i (Load {ty = I32Type; align = 0; offset = 0L; sz = Some Wasm.Types.(Pack8, ZX)}) ^^
     advance get_buf (compile_unboxed_const 1l)
 
   let read_word16 env get_buf =
     check_space env get_buf (compile_unboxed_const 2l) ^^
     get_ptr get_buf ^^
-    G.i (Load {ty = I32Type; align = 0; offset = 0l; sz = Some Wasm.Types.(Pack16, ZX)}) ^^
+    G.i (Load {ty = I32Type; align = 0; offset = 0L; sz = Some Wasm.Types.(Pack16, ZX)}) ^^
     advance get_buf (compile_unboxed_const 2l)
 
   let read_word32 env get_buf =
     check_space env get_buf (compile_unboxed_const 4l) ^^
     get_ptr get_buf ^^
-    G.i (Load {ty = I32Type; align = 0; offset = 0l; sz = None}) ^^
+    G.i (Load {ty = I32Type; align = 0; offset = 0L; sz = None}) ^^
     advance get_buf (compile_unboxed_const 4l)
 
   let speculative_read_word64 env get_buf =
@@ -2975,19 +3109,19 @@ module ReadBuf = struct
       (compile_const_64 (-1L))
       begin
         get_ptr get_buf ^^
-        G.i (Load {ty = I64Type; align = 0; offset = 0l; sz = None})
+        G.i (Load {ty = I64Type; align = 0; offset = 0L; sz = None})
       end
 
   let read_word64 env get_buf =
     check_space env get_buf (compile_unboxed_const 8l) ^^
     get_ptr get_buf ^^
-    G.i (Load {ty = I64Type; align = 0; offset = 0l; sz = None}) ^^
+    G.i (Load {ty = I64Type; align = 0; offset = 0L; sz = None}) ^^
     advance get_buf (compile_unboxed_const 8l)
 
   let read_float64 env get_buf =
     check_space env get_buf (compile_unboxed_const 8l) ^^
     get_ptr get_buf ^^
-    G.i (Load {ty = F64Type; align = 0; offset = 0l; sz = None}) ^^
+    G.i (Load {ty = F64Type; align = 0; offset = 0L; sz = None}) ^^
     advance get_buf (compile_unboxed_const 8l)
 
   let read_blob env get_buf get_len =
@@ -3008,7 +3142,7 @@ sig
   (* word from SR.Vanilla, trapping, unsigned semantics *)
   val to_word32 : E.t -> G.t
   val to_word64 : E.t -> G.t
-  val to_word32_with : E.t -> G.t -> G.t (* with error message on stack (ptr/len) *)
+  val to_word32_with : E.t -> G.t (* with error message on stack (ptr/len) *)
 
   (* word from SR.Vanilla, lossy, raw bits *)
   val truncate_to_word32 : E.t -> G.t
@@ -3037,6 +3171,13 @@ sig
    *)
   val compile_store_to_data_buf_signed : E.t -> G.t
   val compile_store_to_data_buf_unsigned : E.t -> G.t
+  (* given on stack
+     - numeric object (vanilla, TOS)
+     - (unskewed) stream
+    store the binary representation of the numeric object into the stream
+   *)
+  val compile_store_to_stream_signed : E.t -> G.t
+  val compile_store_to_stream_unsigned : E.t -> G.t
   (* given a ReadBuf on stack, consume bytes from it,
      deserializing to a numeric object
      and leave it on the stack (vanilla).
@@ -3044,8 +3185,8 @@ sig
    *)
   val compile_load_from_data_buf : E.t -> G.t -> bool -> G.t
 
-  (* constant *)
-  val constant : E.t -> Big_int.big_int -> E.shared_value
+  (* literals *)
+  val vanilla_lit : E.t -> Big_int.big_int -> int32
 
   (* arithmetic *)
   val compile_abs : E.t -> G.t
@@ -3375,10 +3516,10 @@ module MakeCompact (Num : BigNumType) : BigNumType = struct
       (get_n ^^ clear_tag env ^^ compile_unboxed_const 0l ^^ G.i (Compare (Wasm.Values.I32 I32Op.LtS)))
       (get_n ^^ Num.compile_is_negative env)
 
-  let constant env = function
+  let vanilla_lit env = function
     | n when Big_int.is_int_big_int n && BitTagged.can_tag_const Type.Int (Big_int.int64_of_big_int n) ->
-      E.Vanilla (BitTagged.tag_const Type.Int (Big_int.int64_of_big_int n))
-    | n -> Num.constant env n
+      BitTagged.tag_const Type.Int (Big_int.int64_of_big_int n)
+    | n -> Num.vanilla_lit env n
 
   let compile_neg env =
     let sminl = Int32.shift_left 1l (BitTagged.sbits_of Type.Int) in
@@ -3595,6 +3736,48 @@ module MakeCompact (Num : BigNumType) : BigNumType = struct
         get_buf ^^ get_x ^^ Num.compile_store_to_data_buf_signed env)
       env
 
+  let compile_store_to_stream_unsigned env =
+    let set_x, get_x = new_local env "x" in
+    let set_stream, get_stream = new_local env "stream" in
+    set_x ^^ set_stream ^^
+    get_x ^^
+    try_unbox I32Type
+      (fun env ->
+        BitTagged.untag_i32 __LINE__ env Type.Int ^^ set_x ^^
+        (* get size & reserve & encode *)
+        let dest =
+          get_stream ^^
+          I32Leb.compile_leb128_size get_x ^^
+          E.call_import env "rts" "stream_reserve" in
+        I32Leb.compile_store_to_data_buf_unsigned env get_x dest)
+      (fun env ->
+        G.i Drop ^^
+        get_stream ^^ get_x ^^ Num.compile_store_to_stream_unsigned env ^^
+        compile_unboxed_zero)
+      env ^^
+      G.i Drop
+
+  let compile_store_to_stream_signed env =
+    let set_x, get_x = new_local env "x" in
+    let set_stream, get_stream = new_local env "stream" in
+    set_x ^^ set_stream ^^
+    get_x ^^
+    try_unbox I32Type
+      (fun env ->
+        BitTagged.untag_i32 __LINE__ env Type.Int ^^ set_x ^^
+        (* get size & reserve & encode *)
+        let dest =
+          get_stream ^^
+          I32Leb.compile_sleb128_size get_x ^^
+          E.call_import env "rts" "stream_reserve" in
+        I32Leb.compile_store_to_data_buf_signed env get_x dest)
+      (fun env ->
+        G.i Drop ^^
+        get_stream ^^ get_x ^^ Num.compile_store_to_stream_signed env ^^
+        compile_unboxed_zero)
+      env ^^
+      G.i Drop
+
   let compile_data_size_unsigned env =
     try_unbox I32Type
       (fun _ ->
@@ -3684,19 +3867,21 @@ module MakeCompact (Num : BigNumType) : BigNumType = struct
       (get_a ^^ BitTagged.untag_i32 __LINE__ env Type.Int) (*TBR*)
       (get_a ^^ Num.to_word32 env)
 
-  let to_word32_with env get_err_msg =
+  let to_word32_with env =
     let set_a, get_a = new_local env "a" in
-    set_a ^^ get_a ^^
+    let set_err_msg, get_err_msg = new_local env "err_msg" in
+    set_err_msg ^^ set_a ^^
+    get_a ^^
     BitTagged.if_tagged_scalar env [I32Type]
       (get_a ^^ BitTagged.untag_i32 __LINE__ env Type.Int) (* TBR *)
-      (get_a ^^ Num.to_word32_with env get_err_msg)
+      (get_a ^^ get_err_msg ^^ Num.to_word32_with env)
 end
 
 module BigNumLibtommath : BigNumType = struct
 
   let to_word32 env = E.call_import env "rts" "bigint_to_word32_trap"
   let to_word64 env = E.call_import env "rts" "bigint_to_word64_trap"
-  let to_word32_with env get_err_msg = get_err_msg ^^ E.call_import env "rts" "bigint_to_word32_trap_with"
+  let to_word32_with env = E.call_import env "rts" "bigint_to_word32_trap_with"
 
   let truncate_to_word32 env = E.call_import env "rts" "bigint_to_word32_wrap"
   let truncate_to_word64 env = E.call_import env "rts" "bigint_to_word64_wrap"
@@ -3717,6 +3902,9 @@ module BigNumLibtommath : BigNumType = struct
     get_n ^^ get_buf ^^ E.call_import env "rts" "bigint_leb128_encode" ^^
     get_n ^^ E.call_import env "rts" "bigint_leb128_size"
 
+  let compile_store_to_stream_unsigned env =
+    E.call_import env "rts" "bigint_leb128_stream_encode"
+
   let compile_store_to_data_buf_signed env =
     let (set_buf, get_buf) = new_local env "buf" in
     let (set_n, get_n) = new_local env "n" in
@@ -3724,11 +3912,14 @@ module BigNumLibtommath : BigNumType = struct
     get_n ^^ get_buf ^^ E.call_import env "rts" "bigint_sleb128_encode" ^^
     get_n ^^ E.call_import env "rts" "bigint_sleb128_size"
 
+  let compile_store_to_stream_signed env =
+    E.call_import env "rts" "bigint_sleb128_stream_encode"
+
   let compile_load_from_data_buf env get_data_buf = function
     | false -> get_data_buf ^^ E.call_import env "rts" "bigint_leb128_decode"
     | true -> get_data_buf ^^ E.call_import env "rts" "bigint_sleb128_decode"
 
-  let constant env n =
+  let vanilla_lit env n =
     (* See enum mp_sign *)
     let sign = if Big_int.sign_big_int n >= 0 then 0l else 1l in
 
@@ -3742,19 +3933,22 @@ module BigNumLibtommath : BigNumType = struct
         then []
         else
           let (a, b) = Big_int.quomod_big_int n twoto28 in
-          [ compile_unboxed_const (Big_int.int32_of_big_int b) ] @ go a
+          [ Big_int.int32_of_big_int b ] @ go a
       in go n
     in
     (* how many 32 bit digits *)
     let size = Int32.of_int (List.length limbs) in
 
     (* cf. mp_int in tommath.h *)
-    Tagged.shared_object env (fun env -> Tagged.obj env Tagged.BigInt ([
-      compile_unboxed_const size; (* used *)
-      compile_unboxed_const size; (* size; relying on Heap.word_size == size_of(mp_digit) *)
-      compile_unboxed_const sign;
-      compile_unboxed_const 0l; (* dp; this will be patched in BigInt::mp_int_ptr in the RTS when used *)
-    ] @ limbs))
+    let ptr = Tagged.shared_static_obj env Tagged.BigInt StaticBytes.[
+      I32 size; (* used *)
+      I32 size; (* size; relying on Heap.word_size == size_of(mp_digit) *)
+      I32 sign;
+      I32 0l; (* dp; this will be patched in BigInt::mp_int_ptr in the RTS when used *)
+      i32s limbs
+
+    ] in
+    ptr
 
   let assert_nonneg env =
     Func.share_code1 Func.Never env "assert_nonneg" ("n", I32Type) [I32Type] (fun env get_n ->
@@ -3825,6 +4019,210 @@ module Prim = struct
     TaggedSmallWord.shift_leftWordNtoI32 b
 end (* Prim *)
 
+module Object = struct
+ (* An object with a mutable field1 and immutable field 2 has the following
+    heap layout:
+
+    ┌──────┬─────┬──────────┬──────────┬─────────┬─────────────┬───┐
+    │ obj header │ n_fields │ hash_ptr │ ind_ptr │ field2_data │ … │
+    └──────┴─────┴──────────┴┬─────────┴┬────────┴─────────────┴───┘
+         ┌───────────────────┘          │
+         │   ┌──────────────────────────┘
+         │   ↓
+         │  ╶─┬────────┬─────────────┐
+         │    │ MutBox │ field1_data │
+         ↓    └────────┴─────────────┘
+        ╶─┬─────────────┬─────────────┬───┐
+          │ field1_hash │ field2_hash │ … │
+          └─────────────┴─────────────┴───┘
+
+    The object header includes the object tag (Object) and the forwarding pointer.
+    The forwarding pointer is only reserved if compiled for the incremental GC.
+
+    The field hash array lives in static memory (so no size header needed).
+    The hash_ptr is skewed.
+
+    The field2_data for immutable fields is a vanilla word.
+
+    The field1_data for mutable fields are pointers to a MutBox. This indirection 
+    is a consequence of how we compile object literals with `await` instructions, 
+    as these mutable fields need to be able to alias local mutable variables, e.g.
+    `{ public let f = 1; await async (); public let var v = 2}`.
+    Other use cases are object constructors with public and private mutable fields, 
+    where the physical record only wraps the public fields.
+    Moreover, closures can selectively capture the individual fields instead of 
+    the containing object.
+    Finally, Candid stabilization/destabilization also relies on the indirection 
+    of mutable fields, to reserve and store alias information in those locations.
+
+    We could alternatively switch to an allocate-first approach in the
+    await-translation of objects, and get rid of this indirection -- if it were
+    not for the implementing of sharing of mutable stable values.
+  *)
+
+  let header_size env = Int32.add (Tagged.header_size env) 2l
+
+  (* Number of object fields *)
+  let size_field env = Int32.add (Tagged.header_size env) 0l
+  let hash_ptr_field env = Int32.add (Tagged.header_size env) 1l
+
+  module FieldEnv = Env.Make(String)
+
+  (* This is for static objects *)
+  let vanilla_lit env (fs : (string * int32) list) : int32 =
+    let (hashes, ptrs) = fs
+      |> List.map (fun (n, ptr) -> (Mo_types.Hash.hash n,ptr))
+      |> List.sort compare
+      |> List.split
+    in
+
+    let hash_ptr = E.add_static env StaticBytes.[ i32s hashes ] in
+
+    Tagged.shared_static_obj env Tagged.Object StaticBytes.[
+      I32 (Int32.of_int (List.length fs));
+      I32 hash_ptr;
+      i32s ptrs;
+    ]
+
+  (* This is for non-recursive objects, i.e. ObjNewE *)
+  (* The instructions in the field already create the indirection if needed *)
+  let lit_raw env (fs : (string * (unit -> G.t)) list ) =
+    let name_pos_map =
+      fs |>
+      (* We could store only public fields in the object, but
+         then we need to allocate separate boxes for the non-public ones:
+         List.filter (fun (_, vis, f) -> vis.it = Public) |>
+      *)
+      List.map (fun (n,_) -> (E.hash env n, n)) |>
+      List.sort compare |>
+      List.mapi (fun i (_h,n) -> (n,Int32.of_int i)) |>
+      List.fold_left (fun m (n,i) -> FieldEnv.add n i m) FieldEnv.empty in
+
+    let sz = Int32.of_int (FieldEnv.cardinal name_pos_map) in
+
+    (* Create hash array *)
+    let hashes = fs |>
+      List.map (fun (n,_) -> E.hash env n) |>
+      List.sort compare in
+    let hash_ptr = E.add_static env StaticBytes.[ i32s hashes ] in
+
+    (* Allocate memory *)
+    let (set_ri, get_ri, ri) = new_local_ env I32Type "obj" in
+    Tagged.alloc env (Int32.add (header_size env) sz) Tagged.Object ^^
+    set_ri ^^
+
+    (* Set size *)
+    get_ri ^^
+    compile_unboxed_const sz ^^
+    Tagged.store_field env (size_field env) ^^
+
+    (* Set hash_ptr *)
+    get_ri ^^
+    compile_unboxed_const hash_ptr ^^
+    Tagged.store_field env (hash_ptr_field env) ^^
+
+    (* Write all the fields *)
+    let init_field (name, mk_is) : G.t =
+      (* Write the pointer to the indirection *)
+      get_ri ^^
+      mk_is () ^^
+      let i = FieldEnv.find name name_pos_map in
+      let offset = Int32.add (header_size env) i in
+      Tagged.store_field env offset
+    in
+    G.concat_map init_field fs ^^
+
+    (* Return the pointer to the object *)
+    get_ri ^^
+    Tagged.allocation_barrier env
+
+  (* Returns a pointer to the object field (without following the field indirection) *)
+  let idx_hash_raw env low_bound =
+    let name = Printf.sprintf "obj_idx<%d>" low_bound  in
+    Func.share_code2 Func.Always env name (("x", I32Type), ("hash", I32Type)) [I32Type] (fun env get_x get_hash ->
+      let set_x = G.setter_for get_x in
+      let set_h_ptr, get_h_ptr = new_local env "h_ptr" in
+
+      get_x ^^ Tagged.load_forwarding_pointer env ^^ set_x ^^
+
+      get_x ^^ Tagged.load_field env (hash_ptr_field env) ^^
+
+      (* Linearly scan through the fields (binary search can come later) *)
+      (* unskew h_ptr and advance both to low bound *)
+      compile_add_const Int32.(add ptr_unskew (mul Heap.word_size (of_int low_bound))) ^^
+      set_h_ptr ^^
+      get_x ^^
+      compile_add_const Int32.(mul Heap.word_size (add (header_size env) (of_int low_bound))) ^^
+      set_x ^^
+      G.loop0 (
+          get_h_ptr ^^ load_unskewed_ptr ^^
+          get_hash ^^ G.i (Compare (Wasm.Values.I32 I32Op.Eq)) ^^
+          G.if0
+            (get_x ^^ G.i Return)
+            (get_h_ptr ^^ compile_add_const Heap.word_size ^^ set_h_ptr ^^
+             get_x ^^ compile_add_const Heap.word_size ^^ set_x ^^
+             G.i (Br (nr 1l)))
+        ) ^^
+      G.i Unreachable
+    )
+
+  (* Returns a pointer to the object field (possibly following the indirection) *)
+  let idx_hash env low_bound indirect =
+    if indirect
+    then
+      let name = Printf.sprintf "obj_idx_ind<%d>" low_bound in
+      Func.share_code2 Func.Never env name (("x", I32Type), ("hash", I32Type)) [I32Type] (fun env get_x get_hash ->
+      get_x ^^ get_hash ^^
+      idx_hash_raw env low_bound ^^
+      load_ptr ^^ Tagged.load_forwarding_pointer env ^^
+      compile_add_const (Int32.mul (MutBox.field env) Heap.word_size)
+    )
+    else idx_hash_raw env low_bound
+
+  let field_type env obj_type s =
+    let _, fields = Type.as_obj_sub [s] obj_type in
+    Type.lookup_val_field s fields
+
+  (* Determines whether the field is mutable (and thus needs an indirection) *)
+  let is_mut_field env obj_type s =
+    let _, fields = Type.as_obj_sub [s] obj_type in
+    Type.is_mut (Type.lookup_val_field s fields)
+
+  (* Computes a lower bound for the positional index of a field in an object *)
+  let field_lower_bound env obj_type s =
+    let open Type in
+    let _, fields = as_obj_sub [s] obj_type in
+    List.iter (function {typ = Typ _; _} -> assert false | _ -> ()) fields;
+    let sorted_by_hash =
+      List.sort
+        (fun (h1, _) (h2, _) -> Lib.Uint32.compare h1 h2)
+        (List.map (fun f -> Lib.Uint32.of_int32 (E.hash env f.lab), f) fields) in
+    match Lib.List.index_of s (List.map (fun (_, {lab; _}) -> lab) sorted_by_hash) with
+    | Some i -> i
+    | _ -> assert false
+
+  (* Returns a pointer to the object field (without following the indirection) *)
+  let idx_raw env f =
+    compile_unboxed_const (E.hash env f) ^^
+    idx_hash_raw env 0
+
+  (* Returns a pointer to the object field (possibly following the indirection) *)
+  let idx env obj_type f =
+    compile_unboxed_const (E.hash env f) ^^
+    idx_hash env (field_lower_bound env obj_type f) (is_mut_field env obj_type f)
+
+  (* load the value (or the mutbox) *)
+  let load_idx_raw env f =
+    idx_raw env f ^^
+    load_ptr
+
+  (* load the actual value (dereferencing the mutbox) *)
+  let load_idx env obj_type f =
+    idx env obj_type f ^^
+    load_ptr
+
+end (* Object *)
+
 module Blob = struct
   (* The layout of a blob object is
 
@@ -3833,6 +4231,7 @@ module Blob = struct
      └──────┴─────┴─────────┴──────────────────┘
 
     The object header includes the object tag (Blob) and the forwarding pointer.
+    The forwarding pointer is only reserved if compiled for the incremental GC.
 
     This heap object is used for various kinds of binary, non-pointer data.
 
@@ -3840,12 +4239,12 @@ module Blob = struct
     Unicode.
   *)
 
-  let header_size = Int32.add Tagged.header_size 1l
-  let len_field = Int32.add Tagged.header_size 0l
+  let header_size env = Int32.add (Tagged.header_size env) 1l
+  let len_field env = Int32.add (Tagged.header_size env) 0l
 
   let len env =
     Tagged.load_forwarding_pointer env ^^
-    Tagged.load_field env len_field
+    Tagged.load_field env (len_field env)
 
   let len_nat env =
     Func.share_code1 Func.Never env "blob_len" ("text", I32Type) [I32Type] (fun env get ->
@@ -3854,6 +4253,18 @@ module Blob = struct
       BigNum.from_word32 env
     )
 
+  let vanilla_lit env sort s =
+    Tagged.shared_static_obj env Tagged.(Blob sort) StaticBytes.[
+      I32 (Int32.of_int (String.length s));
+      Bytes s;
+    ]
+
+  let lit env sort s = compile_unboxed_const (vanilla_lit env sort s)
+
+  let lit_ptr_len env s =
+    compile_unboxed_const (Int32.add ptr_unskew (E.add_static env StaticBytes.[Bytes s])) ^^
+    compile_unboxed_const (Int32.of_int (String.length s))
+
   let alloc env sort len =
     compile_unboxed_const Tagged.(int_of_tag (Blob sort)) ^^
     len ^^
@@ -3861,41 +4272,18 @@ module Blob = struct
     (* uninitialized blob payload is allowed by the barrier *)
     Tagged.allocation_barrier env
 
-  let unskewed_payload_offset env = Int32.(add ptr_unskew (mul Heap.word_size header_size))
-  
+  let unskewed_payload_offset env = Int32.(add ptr_unskew (mul Heap.word_size (header_size env)))
+
   let payload_ptr_unskewed env =
     Tagged.load_forwarding_pointer env ^^
     compile_add_const (unskewed_payload_offset env)
 
-  let load_data_segment env sort segment_index data_length =
-    let (set_blob, get_blob) = new_local env "data_segment_blob" in
-    alloc env sort data_length ^^ set_blob ^^
-    get_blob ^^ payload_ptr_unskewed env ^^ (* target address *)
-    compile_unboxed_const 0l ^^ (* data offset *)
-    data_length ^^
-    G.i (MemoryInit (nr segment_index)) ^^
-    get_blob
-
-  let constant env sort payload =
-    Tagged.shared_object env (fun env -> 
-      let blob_length = Int32.of_int (String.length payload) in
-      let segment_index = E.add_static env StaticBytes.[Bytes payload] in
-      load_data_segment env sort segment_index (compile_unboxed_const blob_length)
-    )
-
-  let lit env sort payload =
-    Tagged.materialize_shared_value env (constant env sort payload)
-
   let as_ptr_len env = Func.share_code1 Func.Never env "as_ptr_size" ("x", I32Type) [I32Type; I32Type] (
     fun env get_x ->
       get_x ^^ payload_ptr_unskewed env ^^
       get_x ^^ len env
     )
 
-  let lit_ptr_len env sort s =
-    lit env sort s ^^
-    as_ptr_len env
-  
   let of_ptr_size env = Func.share_code2 Func.Always env "blob_of_ptr_size" (("ptr", I32Type), ("size" , I32Type)) [I32Type] (
     fun env get_ptr get_size ->
       let (set_x, get_x) = new_local env "x" in
@@ -3998,14 +4386,14 @@ module Blob = struct
           payload_ptr_unskewed env ^^
           get_i ^^
           G.i (Binary (Wasm.Values.I32 I32Op.Add)) ^^
-          G.i (Load {ty = I32Type; align = 0; offset = 0l; sz = Some Wasm.Types.(Pack8, ZX)}) ^^
+          G.i (Load {ty = I32Type; align = 0; offset = 0L; sz = Some Wasm.Types.(Pack8, ZX)}) ^^
           set_a ^^
 
           get_y ^^
           payload_ptr_unskewed env ^^
           get_i ^^
           G.i (Binary (Wasm.Values.I32 I32Op.Add)) ^^
-          G.i (Load {ty = I32Type; align = 0; offset = 0l; sz = Some Wasm.Types.(Pack8, ZX)}) ^^
+          G.i (Load {ty = I32Type; align = 0; offset = 0L; sz = Some Wasm.Types.(Pack8, ZX)}) ^^
           set_b ^^
 
           get_a ^^ get_b ^^ G.i (Compare (Wasm.Values.I32 I32Op.Eq)) ^^
@@ -4050,215 +4438,31 @@ module Blob = struct
     set_len ^^
     alloc env Tagged.B get_len ^^ payload_ptr_unskewed env
 
-end (* Blob *)
-
-module Object = struct
-  (* An object with a mutable field1 and immutable field 2 has the following
-     heap layout:
- 
-     ┌──────┬─────┬──────────┬─────────┬─────────────┬───┐
-     │ obj header │ hash_ptr │ ind_ptr │ field2_data │ … │
-     └──────┴─────┴┬─────────┴┬────────┴─────────────┴───┘
-          ┌────────┘          │
-          │   ┌───────────────┘
-          │   ↓
-          │  ╶─┬────────┬─────────────┐
-          │    │ MutBox │ field1_data │
-          ↓    └────────┴─────────────┘
-          ┌─────────────┬─────────────┬─────────────┬───┐
-          │ blob header │ field1_hash │ field2_hash │ … │
-          └─────────────┴─────────────┴─────────────┴───┘        
- 
-     The object header includes the object tag (Object) and the forwarding pointer.
-     The size of the object (number of fields) can be derived from the hash blob via `hash_ptr`.
-
-     The field hashes reside in a blob inside the dynamic heap.
-     The hash blob needs to be tracked by the GC, but not the content of the hash blob.
-     This is because the hash values are plain numbers that would look like skewed pointers.ters.
-     The hash_ptr is skewed.
- 
-     The field2_data for immutable fields is a vanilla word.
- 
-     The field1_data for mutable fields are pointers to a MutBox. This indirection 
-     is a consequence of how we compile object literals with `await` instructions, 
-     as these mutable fields need to be able to alias local mutable variables, e.g.
-     `{ public let f = 1; await async (); public let var v = 2}`.
-     Other use cases are object constructors with public and private mutable fields, 
-     where the physical record only wraps the public fields.
-     Moreover, closures can selectively capture the individual fields instead of 
-     the containing object.
-     Finally, classical Candid stabilization/destabilization also relies on the 
-     indirection of mutable fields, to reserve and store alias information in those 
-     locations.
- 
-     We could alternatively switch to an allocate-first approach in the
-     await-translation of objects, and get rid of this indirection -- if it were
-     not for the implementing of sharing of mutable stable values.
-   *)
- 
-  let header_size = Int32.add Tagged.header_size 1l
- 
-  let hash_ptr_field = Int32.add Tagged.header_size 0l
- 
-  module FieldEnv = Env.Make(String)
-
-  (* This is for non-recursive objects. *)
-  (* The instructions in the field already create the indirection if needed *)
-  let object_builder env (fs : (string * (E.t -> G.t)) list ) =
-    let name_pos_map =
-      fs |>
-        (* We could store only public fields in the object, but
-          then we need to allocate separate boxes for the non-public ones:
-          List.filter (fun (_, vis, f) -> vis.it = Public) |>
-        *)
-        List.map (fun (n,_) -> (E.hash env n, n)) |>
-        List.sort compare |>
-        List.mapi (fun i (_h,n) -> (n,Int32.of_int i)) |>
-        List.fold_left (fun m (n,i) -> FieldEnv.add n i m) FieldEnv.empty in
-
-      let sz = Int32.of_int (FieldEnv.cardinal name_pos_map) in
-
-      (* Create hash blob *)
-      let hashes = fs |>
-        List.map (fun (n,_) -> E.hash env n) |>
-        List.sort compare in
-      let hash_blob =
-        let hash_payload = StaticBytes.[ i32s hashes ] in
-        Blob.constant env Tagged.B (StaticBytes.as_bytes hash_payload)
-      in
-      
-      (fun env -> 
-        (* Allocate memory *)
-        let (set_ri, get_ri, ri) = new_local_ env I32Type "obj" in
-        Tagged.alloc env (Int32.add header_size sz) Tagged.Object ^^
-        set_ri ^^
-
-        (* Set hash_ptr *)
-        get_ri ^^
-        Tagged.materialize_shared_value env hash_blob ^^
-        Tagged.store_field env hash_ptr_field ^^
-
-        (* Write all the fields *)
-        let init_field (name, generate_value) : G.t =
-          (* Write the pointer to the indirection *)
-          get_ri ^^
-          generate_value env ^^
-          let i = FieldEnv.find name name_pos_map in
-          let offset = Int32.add header_size i in
-          Tagged.store_field env offset
-        in
-        G.concat_map init_field fs ^^
-
-        (* Return the pointer to the object *)
-        get_ri ^^
-        Tagged.allocation_barrier env
-      )
-
-  let constant env (fs : (string * E.shared_value) list) =
-    let materialize_fields = List.map (fun (name, value) -> (name, fun env -> Tagged.materialize_shared_value env value)) fs in
-    let allocation = object_builder env materialize_fields in
-    Tagged.shared_object env allocation
-
-  (* This is for non-recursive objects, i.e. ObjNewE *)
-  (* The instructions in the field already create the indirection if needed *)
-  let lit_raw env (fs : (string * (unit -> G.t)) list ) =
-    let materialize_fields = List.map (fun (name, generate_value) -> (name, (fun env -> generate_value ()))) fs in
-    let allocation = object_builder env materialize_fields in
-    allocation env
-
-  (* Reflection used by orthogonal persistence: 
-     Check whether an (actor) object contains a specific field *)
-  let contains_field env field =
-    compile_unboxed_const (E.hash env field) ^^
-    E.call_import env "rts" "contains_field"
- 
-  (* Returns a pointer to the object field (without following the field indirection) *)
-  let idx_hash_raw env low_bound =
-    let name = Printf.sprintf "obj_idx<%d>" low_bound  in
-    Func.share_code2 Func.Always env name (("x", I32Type), ("hash", I32Type)) [I32Type] (fun env get_x get_hash ->
-      let set_x = G.setter_for get_x in
-      let set_h_ptr, get_h_ptr = new_local env "h_ptr" in
-
-      get_x ^^ Tagged.load_forwarding_pointer env ^^ set_x ^^
-
-      get_x ^^ Tagged.load_field env hash_ptr_field ^^
-      Blob.payload_ptr_unskewed env ^^
-
-      (* Linearly scan through the fields (binary search can come later) *)
-      (* unskew h_ptr and advance both to low bound *)
-      compile_add_const Int32.(mul Heap.word_size (of_int low_bound)) ^^
-      set_h_ptr ^^
+  (* TODO: rewrite using MemoryFill *)
+  let clear env =
+    Func.share_code1 Func.Always env "blob_clear" ("x", I32Type) [] (fun env get_x ->
+      let (set_ptr, get_ptr) = new_local env "ptr" in
+      let (set_len, get_len) = new_local env "len" in
       get_x ^^
-      compile_add_const Int32.(mul Heap.word_size (add header_size (of_int low_bound))) ^^
-      set_x ^^
-      G.loop0 (
-          get_h_ptr ^^ load_unskewed_ptr ^^
-          get_hash ^^ G.i (Compare (Wasm.Values.I32 I32Op.Eq)) ^^
-          G.if0
-            (get_x ^^ G.i Return)
-            (get_h_ptr ^^ compile_add_const Heap.word_size ^^ set_h_ptr ^^
-            get_x ^^ compile_add_const Heap.word_size ^^ set_x ^^
-            G.i (Br (nr 1l)))
-        ) ^^
-      G.i Unreachable
-    )
-
-  (* Returns a pointer to the object field (possibly following the indirection) *)
-  let idx_hash env low_bound indirect =
-    if indirect
-    then
-      let name = Printf.sprintf "obj_idx_ind<%d>" low_bound in
-      Func.share_code2 Func.Never env name (("x", I32Type), ("hash", I32Type)) [I32Type] (fun env get_x get_hash ->
-      get_x ^^ get_hash ^^
-      idx_hash_raw env low_bound ^^
-      load_ptr ^^ Tagged.load_forwarding_pointer env ^^
-      compile_add_const (Int32.mul MutBox.field Heap.word_size)
-    )
-    else idx_hash_raw env low_bound
-
-  let field_type env obj_type s =
-    let _, fields = Type.as_obj_sub [s] obj_type in
-    Type.lookup_val_field s fields
-
-  (* Determines whether the field is mutable (and thus needs an indirection) *)
-  let is_mut_field env obj_type s =
-    let _, fields = Type.as_obj_sub [s] obj_type in
-    Type.is_mut (Type.lookup_val_field s fields)
-
-  (* Computes a lower bound for the positional index of a field in an object *)
-  let field_lower_bound env obj_type s =
-    let open Type in
-    let _, fields = as_obj_sub [s] obj_type in
-    List.iter (function {typ = Typ _; _} -> assert false | _ -> ()) fields;
-    let sorted_by_hash =
-      List.sort
-        (fun (h1, _) (h2, _) -> Lib.Uint32.compare h1 h2)
-        (List.map (fun f -> Lib.Uint32.of_int32 (E.hash env f.lab), f) fields) in
-    match Lib.List.index_of s (List.map (fun (_, {lab; _}) -> lab) sorted_by_hash) with
-    | Some i -> i
-    | _ -> assert false
-
-  (* Returns a pointer to the object field (without following the indirection) *)
-  let idx_raw env f =
-    compile_unboxed_const (E.hash env f) ^^
-    idx_hash_raw env 0
+      as_ptr_len env ^^
+      set_len ^^
+      set_ptr ^^
 
-  (* Returns a pointer to the object field (possibly following the indirection) *)
-  let idx env obj_type f =
-    compile_unboxed_const (E.hash env f) ^^
-    idx_hash env (field_lower_bound env obj_type f) (is_mut_field env obj_type f)
+      (* round to word size *)
+      get_len ^^
+      compile_add_const (Int32.sub Heap.word_size 1l) ^^
+      compile_divU_const Heap.word_size ^^
 
-  (* load the value (or the mutbox) *)
-  let load_idx_raw env f =
-    idx_raw env f ^^
-    load_ptr
+      (* clear all words *)
+      from_0_to_n env (fun get_i ->
+        get_ptr ^^
+        compile_unboxed_const 0l ^^
+        store_unskewed_ptr ^^
+        get_ptr ^^
+        compile_add_const Heap.word_size ^^
+        set_ptr))
 
-  (* load the actual value (dereferencing the mutbox) *)
-  let load_idx env obj_type f =
-    idx env obj_type f ^^
-    load_ptr
- 
-end (* Object *) 
+end (* Blob *)
 
 module Region = struct
   (*
@@ -4360,6 +4564,7 @@ module Text = struct
      └──────┴─────┴─────────┴───────┴───────┘
 
     The object header includes the object tag (TAG_CONCAT defined in rts/types.rs) and the forwarding pointer
+    The forwarding pointer is only reserved if compiled for the incremental GC.
 
     This is internal to rts/text.c, with the exception of GC-related code.
   *)
@@ -4392,7 +4597,8 @@ module Text = struct
     get_blob ^^ Blob.as_ptr_len env ^^
     E.call_import env "rts" "utf8_valid" ^^
     G.if1 I32Type
-      (get_blob ^^ Blob.copy env Tagged.B Tagged.T ^^
+      (get_blob ^^ Blob.as_ptr_len env ^^
+       of_ptr_size env ^^ (* creates text blob *)
        set_blob ^^
        Opt.inject_simple env get_blob)
       (Opt.null_lit env)
@@ -4439,32 +4645,33 @@ module Arr = struct
      └──────┴─────┴──────────┴────────┴───┘
 
      The object  header includes the object tag (Array) and the forwarding pointer.
+     The forwarding pointer is only reserved if compiled for the incremental GC.
 
      No difference between mutable and immutable arrays.
   *)
 
   (* NB max_array_size must agree with limit 2^29 imposed by RTS constants.MAX_ARRAY_SIZE *)
-  let max_array_size = Int32.shift_left 1l 29 (* inclusive *)
+  let max_array_size env = Int32.shift_left 1l 29 (* inclusive *)
 
-  let header_size = Int32.add Tagged.header_size 1l
+  let header_size env = Int32.add (Tagged.header_size env) 1l
   let element_size = 4l
-  let len_field = Int32.add Tagged.header_size 0l
+  let len_field env = Int32.add (Tagged.header_size env) 0l
 
   let len env =
     Tagged.load_forwarding_pointer env ^^
-    Tagged.load_field env len_field
+    Tagged.load_field env (len_field env)
 
   (* Static array access. No checking *)
   let load_field env n =
     Tagged.load_forwarding_pointer env ^^
-    Tagged.load_field env Int32.(add n header_size)
+    Tagged.load_field env Int32.(add n (header_size env))
 
   (* Dynamic array access. Returns the address (not the value) of the field.
      Does no bounds checking *)
   let unsafe_idx env =
     Func.share_code2 Func.Never env "Array.unsafe_idx" (("array", I32Type), ("idx", I32Type)) [I32Type] (fun env get_array get_idx ->
       get_idx ^^
-      compile_add_const header_size ^^
+      compile_add_const (header_size env) ^^
       compile_mul_const element_size ^^
       get_array ^^
       Tagged.load_forwarding_pointer env ^^
@@ -4483,7 +4690,7 @@ module Arr = struct
       E.else_trap_with env "Array index out of bounds" ^^
 
       get_idx ^^
-      compile_add_const header_size ^^
+      compile_add_const (header_size env) ^^
       compile_mul_const element_size ^^
       get_array ^^
       Tagged.load_forwarding_pointer env ^^
@@ -4495,7 +4702,8 @@ module Arr = struct
     Func.share_code2 Func.Never env "Array.idx_bigint" (("array", I32Type), ("idx", I32Type)) [I32Type] (fun env get_array get_idx ->
       get_array ^^
       get_idx ^^
-      BigNum.to_word32_with env (Blob.lit env Tagged.T "Array index out of bounds") ^^
+      Blob.lit env Tagged.T "Array index out of bounds" ^^
+      BigNum.to_word32_with env ^^
       idx env
   )
 
@@ -4503,18 +4711,18 @@ module Arr = struct
      | Type.Array element_type -> element_type
      | _ -> assert false
 
+  let vanilla_lit env sort ptrs =
+    Tagged.shared_static_obj env Tagged.(Array sort) StaticBytes.[
+      I32 (Int32.of_int (List.length ptrs));
+      i32s ptrs;
+    ]
+
   (* Compile an array literal. *)
   let lit env sort element_instructions =
     Tagged.obj env Tagged.(Array sort)
      ([ compile_unboxed_const (Wasm.I32.of_int_u (List.length element_instructions))
       ] @ element_instructions)
 
-  let constant env sort elements =
-    Tagged.shared_object env (fun env ->
-      let materialized_elements = List.map (fun element -> Tagged.materialize_shared_value env element) elements in
-      lit env sort materialized_elements
-    )
-
   (* Does not initialize the fields! *)
   (* Note: Post allocation barrier must be applied after initialization *)
   let alloc env array_sort len =
@@ -4530,7 +4738,7 @@ module Arr = struct
     get_array ^^ Tagged.load_forwarding_pointer env ^^ set_array ^^
 
     (* Initial element pointer, skewed *)
-    compile_unboxed_const header_size ^^
+    compile_unboxed_const (header_size env) ^^
     compile_mul_const element_size ^^
     get_array ^^
     G.i (Binary (Wasm.Values.I32 I32Op.Add)) ^^
@@ -4538,7 +4746,7 @@ module Arr = struct
 
     (* Upper pointer boundary, skewed *)
     get_array ^^
-    Tagged.load_field env len_field ^^
+    Tagged.load_field env (len_field env) ^^
     compile_mul_const element_size ^^
     get_pointer ^^
     G.i (Binary (Wasm.Values.I32 I32Op.Add)) ^^
@@ -4634,7 +4842,7 @@ module Arr = struct
         get_r ^^ get_i ^^ unsafe_idx env ^^
         get_blob ^^ Blob.payload_ptr_unskewed env ^^
         get_i ^^ G.i (Binary (Wasm.Values.I32 I32Op.Add)) ^^
-        G.i (Load {ty = I32Type; align = 0; offset = 0l; sz = Some Wasm.Types.(Pack8, ZX)}) ^^
+        G.i (Load {ty = I32Type; align = 0; offset = 0L; sz = Some Wasm.Types.(Pack8, ZX)}) ^^
         TaggedSmallWord.msb_adjust Type.Nat8 ^^
         TaggedSmallWord.tag env Type.Nat8 ^^
         store_ptr
@@ -4658,7 +4866,7 @@ module Arr = struct
         get_a ^^ get_i ^^ unsafe_idx env ^^
         load_ptr ^^
         TaggedSmallWord.lsb_adjust Type.Nat8 ^^
-        G.i (Store {ty = I32Type; align = 0; offset = 0l; sz = Some Wasm.Types.Pack8})
+        G.i (Store {ty = I32Type; align = 0; offset = 0L; sz = Some Wasm.Types.Pack8})
       ) ^^
 
       get_r
@@ -4683,7 +4891,7 @@ module Tuple = struct
   let load_n env n =
     Tagged.load_forwarding_pointer env ^^
     Tagged.(sanity_check_tag __LINE__ env (Array T)) ^^
-    Tagged.load_field env (Int32.add Arr.header_size n)
+    Tagged.load_field env (Int32.add (Arr.header_size env) n)
 
   (* Takes n elements of the stack and produces an argument tuple *)
   let from_stack env n =
@@ -4839,6 +5047,11 @@ module IC = struct
   let i32s n = Lib.List.make n I32Type
   let i64s n = Lib.List.make n I64Type
 
+  let get_actor_to_persist_function_name = "@get_actor_to_persist"
+
+  let get_actor_to_persist env =
+    G.i (Call (nr (E.built_in env get_actor_to_persist_function_name)))
+
   let import_ic0 env =
       E.add_func_import env "ic0" "accept_message" [] [];
       E.add_func_import env "ic0" "call_data_append" (i32s 2) [];
@@ -4912,23 +5125,23 @@ module IC = struct
             (* We use the iovec functionality to append a newline *)
             get_iovec_ptr ^^
             get_ptr ^^
-            G.i (Store {ty = I32Type; align = 2; offset = 0l; sz = None}) ^^
+            G.i (Store {ty = I32Type; align = 2; offset = 0L; sz = None}) ^^
 
             get_iovec_ptr ^^
             get_len ^^
-            G.i (Store {ty = I32Type; align = 2; offset = 4l; sz = None}) ^^
+            G.i (Store {ty = I32Type; align = 2; offset = 4L; sz = None}) ^^
 
             get_iovec_ptr ^^
             get_iovec_ptr ^^ compile_add_const 16l ^^
-            G.i (Store {ty = I32Type; align = 2; offset = 8l; sz = None}) ^^
+            G.i (Store {ty = I32Type; align = 2; offset = 8L; sz = None}) ^^
 
             get_iovec_ptr ^^
             compile_unboxed_const 1l ^^
-            G.i (Store {ty = I32Type; align = 2; offset = 12l; sz = None}) ^^
+            G.i (Store {ty = I32Type; align = 2; offset = 12L; sz = None}) ^^
 
             get_iovec_ptr ^^
             compile_unboxed_const (Int32.of_int (Char.code '\n')) ^^
-            G.i (Store {ty = I32Type; align = 0; offset = 16l; sz = Some Wasm.Types.Pack8}) ^^
+            G.i (Store {ty = I32Type; align = 0; offset = 16L; sz = Some Wasm.Types.Pack8}) ^^
 
             (* Call fd_write twice to work around
                https://github.com/bytecodealliance/wasmtime/issues/629
@@ -4979,7 +5192,7 @@ module IC = struct
 
   (* For debugging *)
   let _compile_static_print env s =
-    Blob.lit_ptr_len env Tagged.T s ^^ print_ptr_len env
+    Blob.lit_ptr_len env s ^^ print_ptr_len env
 
   let ic_trap env = system_call env "trap"
 
@@ -4990,7 +5203,7 @@ module IC = struct
     | Flags.ICMode | Flags.RefMode -> ic_trap env ^^ G.i Unreachable
 
   let trap_with env s =
-    Blob.lit_ptr_len env Tagged.T s ^^ trap_ptr_len env
+    Blob.lit_ptr_len env s ^^ trap_ptr_len env
 
   let trap_text env  =
     Text.to_blob env ^^ Blob.as_ptr_len env ^^ trap_ptr_len env
@@ -5349,18 +5562,18 @@ module Cycles = struct
     (fun env get_ptr ->
      let set_lower, get_lower = new_local env "lower" in
      get_ptr ^^
-     G.i (Load {ty = I64Type; align = 0; offset = 0l; sz = None }) ^^
+     G.i (Load {ty = I64Type; align = 0; offset = 0L; sz = None }) ^^
      BigNum.from_word64 env ^^
      set_lower ^^
      get_ptr ^^
-     G.i (Load {ty = I64Type; align = 0; offset = 8l; sz = None }) ^^
+     G.i (Load {ty = I64Type; align = 0; offset = 8L; sz = None }) ^^
      G.i (Test (Wasm.Values.I64 I64Op.Eqz)) ^^
      G.if1 I32Type
        get_lower
        begin
          get_lower ^^
          get_ptr ^^
-         G.i (Load {ty = I64Type; align = 0; offset = 8l; sz = None }) ^^
+         G.i (Load {ty = I64Type; align = 0; offset = 8L; sz = None }) ^^
          BigNum.from_word64 env ^^
          (* shift left 64 bits *)
          compile_unboxed_const 64l ^^
@@ -5374,7 +5587,7 @@ module Cycles = struct
     let (set_val, get_val) = new_local env "cycles" in
     set_val ^^
     get_val ^^
-    Tagged.materialize_shared_value env (BigNum.constant env (Big_int.power_int_positive_int 2 128)) ^^
+    compile_unboxed_const (BigNum.vanilla_lit env (Big_int.power_int_positive_int 2 128)) ^^
     BigNum.compile_relop env Lt ^^
     E.else_trap_with env "cycles out of bounds" ^^
 
@@ -5526,17 +5739,15 @@ module StableMem = struct
 
   (* Versioning (c.f. Region.rs) *)
   (* NB: these constants must agree with VERSION_NO_STABLE_MEMORY etc. in Region.rs *)
-  let legacy_version_no_stable_memory = Int32.of_int 0 (* never manifest in serialized form *)
-  let legacy_version_some_stable_memory = Int32.of_int 1
-  let legacy_version_regions = Int32.of_int 2
-  let version_stable_heap_no_regions = Int32.of_int 3
-  let version_stable_heap_regions = Int32.of_int 4
-  let version_max = version_stable_heap_regions
+  let version_no_stable_memory = Int32.of_int 0 (* never manifest in serialized form *)
+  let version_some_stable_memory = Int32.of_int 1
+  let version_regions = Int32.of_int 2
+  let version_max = version_regions
 
   let register_globals env =
     (* size (in pages) *)
     E.add_global64 env "__stablemem_size" Mutable 0L;
-    E.add_global32 env "__stablemem_version" Mutable version_stable_heap_no_regions
+    E.add_global32 env "__stablemem_version" Mutable version_no_stable_memory
 
   let get_mem_size env =
     G.i (GlobalGet (nr (E.get_global env "__stablemem_size")))
@@ -5550,10 +5761,6 @@ module StableMem = struct
   let set_version env =
     G.i (GlobalSet (nr (E.get_global env "__stablemem_version")))
 
-  let region_init env =
-    compile_unboxed_const (if !Flags.use_stable_regions then 1l else 0l) ^^
-    E.call_import env "rts" "region_init"
-
   (* stable memory bounds check *)
   let guard env =
        get_mem_size env ^^
@@ -5630,18 +5837,36 @@ module StableMem = struct
             compile_const_64 (Int64.of_int32 bytes) ^^
             stable64_write env))
 
+  let _read_word32 env =
+    read env false "word32" I32Type 4l load_unskewed_ptr
   let write_word32 env =
     write env false "word32" I32Type 4l store_unskewed_ptr
 
-  let write_word64 env =
-    write env false "word64" I64Type 8l store_unskewed_ptr64
 
-  let read_word32 env =
-    read env false "word32" I32Type 4l load_unskewed_ptr
-  
-  let read_word64 env =
-    read env false "word64" I64Type 8l load_unskewed_ptr64
-  
+  (* read and clear word32 from stable mem offset on stack *)
+  let read_and_clear_word32 env =
+      Func.share_code1 Func.Always env "__stablemem_read_and_clear_word32"
+        ("offset", I64Type) [I32Type]
+        (fun env get_offset ->
+          Stack.with_words env "temp_ptr" 1l (fun get_temp_ptr ->
+            let (set_word, get_word) = new_local env "word" in
+            (* read word *)
+            get_temp_ptr ^^ G.i (Convert (Wasm.Values.I64 I64Op.ExtendUI32)) ^^
+            get_offset ^^
+            compile_const_64 4L ^^
+            stable64_read env ^^
+            get_temp_ptr ^^ load_unskewed_ptr ^^
+            set_word ^^
+            (* write 0 *)
+            get_temp_ptr ^^ compile_unboxed_const 0l ^^ store_unskewed_ptr ^^
+            get_offset ^^
+            get_temp_ptr ^^ G.i (Convert (Wasm.Values.I64 I64Op.ExtendUI32)) ^^
+            compile_const_64 4L ^^
+            stable64_write env ^^
+            (* return word *)
+            get_word
+        ))
+
   (* ensure_pages : ensure at least num pages allocated,
      growing (real) stable memory if needed *)
   let ensure_pages env =
@@ -5667,7 +5892,7 @@ module StableMem = struct
              stable64_grow env)
             get_size)
 
-      (* ensure stable memory includes [offset..offset+size), assumes size > 0 *)
+  (* ensure stable memory includes [offset..offset+size), assumes size > 0 *)
   let ensure env =
       Func.share_code2 Func.Always env "__stablemem_ensure"
         (("offset", I64Type), ("size", I64Type)) []
@@ -5746,29 +5971,29 @@ module StableMem = struct
 
   let load_word8 env =
     read env true "word8" I32Type 1l
-      (G.i (Load {ty = I32Type; align = 0; offset = 0l; sz = Some Wasm.Types.(Pack8, ZX)}))
+      (G.i (Load {ty = I32Type; align = 0; offset = 0L; sz = Some Wasm.Types.(Pack8, ZX)}))
   let store_word8 env =
     write env true "word8" I32Type 1l store_unskewed_ptr
 
   let load_word16 env =
     read env true "word16" I32Type 2l
-      (G.i (Load {ty = I32Type; align = 0; offset = 0l; sz = Some Wasm.Types.(Pack16, ZX)}))
+      (G.i (Load {ty = I32Type; align = 0; offset = 0L; sz = Some Wasm.Types.(Pack16, ZX)}))
   let store_word16 env =
     write env true "word16" I32Type 2l store_unskewed_ptr
 
   let load_word64 env =
     read env true "word64" I64Type 8l
-      (G.i (Load {ty = I64Type; align = 0; offset = 0l; sz = None }))
+      (G.i (Load {ty = I64Type; align = 0; offset = 0L; sz = None }))
   let store_word64 env =
     write env true "word64" I64Type 8l
-      (G.i (Store {ty = I64Type; align = 0; offset = 0l; sz = None}))
+      (G.i (Store {ty = I64Type; align = 0; offset = 0L; sz = None}))
 
   let load_float64 env =
     read env true "float64" F64Type 8l
-      (G.i (Load {ty = F64Type; align = 0; offset = 0l; sz = None }))
+      (G.i (Load {ty = F64Type; align = 0; offset = 0L; sz = None }))
   let store_float64 env =
     write env true "float64" F64Type 8l
-      (G.i (Store {ty = F64Type; align = 0; offset = 0l; sz = None}))
+      (G.i (Store {ty = F64Type; align = 0; offset = 0L; sz = None}))
 
   let load_blob env =
       Func.share_code2 Func.Always env "__stablemem_load_blob"
@@ -5806,9 +6031,10 @@ end (* StableMem *)
 (* Core, legacy interface to IC stable memory, used to implement prims `stableMemoryXXX` of
    library `ExperimentalStableMemory.mo`.
    Each operation dispatches on the state of `StableMem.get_version()`.
-   * StableMem.version_stable_heap_no_regions
-     * use StableMem directly.
-   * StableMem.version_stable_heap_regions: use Region.mo
+   * StableMem.version_no_stable_memory/StableMem.version_some_stable_memory:
+     * use StableMem directly
+     * switch to version_some_stable_memory on non-trivial grow.
+   * StableMem.version_regions: use Region.mo
 *)
 module StableMemoryInterface = struct
 
@@ -5817,7 +6043,7 @@ module StableMemoryInterface = struct
 
   let if_regions env args tys is1 is2 =
     StableMem.get_version env ^^
-    compile_unboxed_const StableMem.version_stable_heap_regions ^^
+    compile_unboxed_const StableMem.version_regions ^^
     G.i (Compare (Wasm.Values.I32 I32Op.Eq)) ^^
     E.if_ env tys
       (get_region0 env ^^ args ^^ is1 env)
@@ -5843,8 +6069,27 @@ module StableMemoryInterface = struct
           [I64Type]
           Region.grow
           (fun env ->
+            (* do StableMem.grow, but detect and record change in version as well *)
+            let (set_res, get_res) = new_local64 env "size" in
             (* logical grow *)
-            StableMem.grow env))
+            StableMem.grow env ^^
+            set_res ^^
+            (* if version = version_no_stable_memory and new mem_size > 0
+               then version := version_some_stable_memory *)
+            StableMem.get_version env ^^
+            compile_eq_const StableMem.version_no_stable_memory ^^
+            StableMem.get_mem_size env ^^
+            compile_const_64 0L ^^
+            G.i (Compare (Wasm.Values.I64 I32Op.GtU)) ^^
+            G.i (Binary (Wasm.Values.I32 I32Op.And)) ^^
+            (G.if0
+               begin
+                 compile_unboxed_const StableMem.version_some_stable_memory ^^
+                 StableMem.set_version env
+               end
+               G.nop) ^^
+            (* return res *)
+            get_res))
 
   let load_blob env =
     E.require_stable_memory env;
@@ -5972,23 +6217,6 @@ module StableMemoryInterface = struct
 
 end
 
-module UpgradeStatistics = struct
-  let get_upgrade_instructions env =
-    E.call_import env "rts" "get_upgrade_instructions"
-  let set_upgrade_instructions env =
-    E.call_import env "rts" "set_upgrade_instructions"
-
-  let add_instructions env =
-    get_upgrade_instructions env ^^
-    GC.instruction_counter env ^^
-    G.i (Binary (Wasm.Values.I64 I64Op.Add)) ^^
-    set_upgrade_instructions env
-
-  let set_instructions env =
-    GC.instruction_counter env ^^
-    set_upgrade_instructions env
-end
-
 module RTS_Exports = struct
   (* Must be called late, after main codegen, to ensure correct generation of
      of functioning or unused-but-trapping stable memory exports (as required)
@@ -6048,6 +6276,34 @@ module RTS_Exports = struct
       edesc = nr (FuncExport (nr keep_memory_reserve_fi))
     });
 
+    if !Flags.gc_strategy <> Flags.Incremental then
+    begin
+      let set_hp_fi =
+        E.add_fun env "__set_hp" (
+        Func.of_body env ["new_hp", I32Type] [] (fun env ->
+          G.i (LocalGet (nr 0l)) ^^
+          GC.set_heap_pointer env
+        )
+      ) in
+      E.add_export env (nr {
+        name = Lib.Utf8.decode "setHP";
+        edesc = nr (FuncExport (nr set_hp_fi))
+      });
+
+      let get_hp_fi = E.add_fun env "__get_hp" (
+        Func.of_body env [] [I32Type] (fun env ->
+          GC.get_heap_pointer env
+        )
+      ) in
+      E.add_export env (nr {
+        name = Lib.Utf8.decode "getHP";
+        edesc = nr (FuncExport (nr get_hp_fi))
+      })
+    end;
+
+
+    (* Stable Memory related exports *)
+
     let when_stable_memory_required_else_trap env code =
       if E.requires_stable_memory env then
         code() else
@@ -6099,27 +6355,6 @@ module RTS_Exports = struct
       edesc = nr (FuncExport (nr ic0_stable64_read_fi))
     });
 
-    let ic0_stable64_size_fi =
-      match E.mode env with
-      | Flags.ICMode | Flags.RefMode ->
-        E.reuse_import env "ic0" "stable64_size"
-      | Flags.WASIMode | Flags.WasmMode ->
-        E.add_fun env "ic0_stable64_size" (
-          Func.of_body env [] [I64Type]
-            (fun env ->
-              if E.requires_stable_memory env then
-                StableMem.stable64_size env
-              else
-                (* The RTS also checks the persistence mode on Wasi without stable memory support *)
-                compile_const_64 0L
-            )
-          )
-    in
-    E.add_export env (nr {
-      name = Lib.Utf8.decode "ic0_stable64_size";
-      edesc = nr (FuncExport (nr ic0_stable64_size_fi))
-    });
-
     let moc_stable_mem_grow_fi =
       E.add_fun env "moc_stable_mem_grow" (
         Func.of_body env ["newPages", I64Type] [I64Type]
@@ -6134,8 +6369,8 @@ module RTS_Exports = struct
       edesc = nr (FuncExport (nr moc_stable_mem_grow_fi))
     });
 
-    let moc_stable_mem_size_fi =
-      E.add_fun env "moc_stable_mem_size" (
+    let moc_stable_mem_get_size_fi =
+      E.add_fun env "moc_stable_mem_get_size" (
         Func.of_body env [] [I64Type]
           (fun env ->
              when_stable_memory_required_else_trap env (fun () ->
@@ -6143,8 +6378,8 @@ module RTS_Exports = struct
         )
     in
     E.add_export env (nr {
-      name = Lib.Utf8.decode "moc_stable_mem_size";
-      edesc = nr (FuncExport (nr moc_stable_mem_size_fi))
+      name = Lib.Utf8.decode "moc_stable_mem_get_size";
+      edesc = nr (FuncExport (nr moc_stable_mem_get_size_fi))
     });
 
     let moc_stable_mem_get_version_fi =
@@ -6180,161 +6415,191 @@ module RTS_Exports = struct
 
 end (* RTS_Exports *)
 
-module Serialization = struct
-  (*
-    The general serialization strategy is as follows:
-    * We statically generate the IDL type description header.
-    * We traverse the data to calculate the size needed for the data buffer and the
-      reference buffer.
-    * We allocate memory for the data buffer and the reference buffer
-      (this memory area is not referenced, so will be dead with the next GC)
-    * We copy the IDL type header to the data buffer.
-    * We traverse the data and serialize it into the data buffer.
-      This is type driven, and we use the `share_code` machinery and names that
-      properly encode the type to resolve loops in a convenient way.
-    * We externalize all that new data space into a databuf
-    * We externalize the reference space into a elembuf
-    * We pass both databuf and elembuf to shared functions
-      (this mimicks the future system API)
 
-    The deserialization is analogous:
-    * We allocate some scratch space, and internalize the databuf and elembuf into it.
-    * We parse the data, in a type-driven way, using normal construction and
-      allocation, while keeping tabs on the type description header for subtyping.
-    * At the end, the scratch space is a hole in the heap, and will be reclaimed
-      by the next GC.
-  *)
+(* Below signature is needed by the serialiser to supply the
+   methods various formats and auxiliary routines. A stream
+   token refers to the stream itself. Depending on the stream's
+   methodology, the token can be a (bump) pointer or a handle
+   (like a `Blob`). The former needs to be updated at certain
+   points because the token will normally reside in locals that
+   nested functions won't have access to. *)
+module type Stream = sig
+  (* Bottleneck routines for streaming in different formats.
+     The `code` must be used linearly. `token` is a fragment
+     of Wasm that puts the stream token onto the stack.
+     Arguments:    env    token  code *)
+  val write_byte : E.t -> G.t -> G.t -> G.t
+  val write_word_leb : E.t -> G.t -> G.t -> G.t
+  val write_word_32 : E.t -> G.t -> G.t -> G.t
+  val write_blob : E.t -> G.t -> G.t -> G.t
+  val write_text : E.t -> G.t -> G.t -> G.t
+  val write_bignum_leb : E.t -> G.t -> G.t -> G.t
+  val write_bignum_sleb : E.t -> G.t -> G.t -> G.t
+
+  (* Creates a fresh stream with header, storing stream token.
+     Arguments:env    size   setter getter header *)
+  val create : E.t -> G.t -> G.t -> G.t -> string -> G.t
+
+  (* Checks the stream's filling, traps if unexpected
+     Arguments:      env    token  size *)
+  val check_filled : E.t -> G.t -> G.t -> G.t
+
+  (* Pushes the stream's current absolute byte offset on stack.
+     The requirement is that the difference between two uses
+     of this method must give a correct _relative_ offset.
+     Arguments:         env    token *)
+  val absolute_offset : E.t -> G.t -> G.t
+
+  (* Finishes the stream, performing consistency checks.
+     Leaves two words on stack, whose interpretation depends
+     on the Stream.
+     Arguments:   env    token  size   header_size *)
+  val terminate : E.t -> G.t -> G.t -> int32 -> G.t
+
+  (* Executes code to eliminate the residual buffer
+     that `terminate` returns (if at all) *)
+  val finalize_buffer : G.t -> G.t
+
+  (* Builds a unique name for a name seed and a type *)
+  val name_for : string -> Type.typ list -> string
+
+  (* Opportunity to flush or update the token. Stream token is on stack. *)
+  val checkpoint : E.t -> G.t -> G.t
+
+  (* Reserve a small fixed number of bytes in the stream and return an
+     address to it. The address is invalidated by a GC, and as such must
+     be written to in the next few instructions. *)
+  val reserve : E.t -> G.t -> int32 -> G.t
+end
 
-  module Strm = struct
-    (* Creates a fresh stream with header, storing stream token. *)
-    let create env get_data_size set_data_buf get_data_buf header =
-      let header_size = Int32.of_int (String.length header) in
-      get_data_size ^^ compile_add_const header_size ^^
-      Blob.dyn_alloc_scratch env ^^ set_data_buf ^^
-      get_data_buf ^^
-      Blob.lit env Tagged.B header ^^ Blob.payload_ptr_unskewed env ^^
-      compile_unboxed_const header_size ^^
-      Heap.memcpy env ^^
-      get_data_buf ^^ compile_add_const header_size ^^ set_data_buf
-  
-    (* Checks the stream's filling, traps if unexpected *)
-    let check_filled env get_data_buf get_data_size =
-      get_data_buf ^^ get_data_size ^^ G.i (Binary (Wasm.Values.I32 I32Op.Add)) ^^
-      G.i (Compare (Wasm.Values.I32 I32Op.Eq)) ^^
-      E.else_trap_with env "data buffer not filled"
-  
-    (* Finishes the stream, performing consistency checks. 
-       Returns payload address and size including the header. *)
-    let terminate env get_data_buf get_data_size header_size =
-      get_data_buf ^^ compile_sub_const header_size ^^
-      get_data_size ^^ compile_add_const header_size
-  
-    (* Builds a unique name for a name seed and a type. *)
-    let name_for fn_name ts = "@" ^ fn_name ^ "<" ^ Typ_hash.typ_seq_hash ts ^ ">"
-  
-    let advance_data_buf get_data_buf =
-      get_data_buf ^^ G.i (Binary (Wasm.Values.I32 I32Op.Add)) ^^ G.setter_for get_data_buf
-  
-    (* Pushes the stream's current absolute byte offset on stack.
-       The requirement is that the difference between two uses
-       of this method must give a correct _relative_ offset. *)
-    let absolute_offset _env get_data_buf = get_data_buf
-  
-    (* Opportunity to flush or update the token. Stream token is on stack. *)
-    let checkpoint _env get_data_buf = G.setter_for get_data_buf
-  
-    (* Reserve a small fixed number of bytes in the stream and return an
-       address to it. The address is invalidated by a GC, and as such must
-       be written to in the next few instructions. *)
-    let reserve _env get_data_buf bytes =
-      get_data_buf ^^ get_data_buf ^^ compile_add_const bytes ^^ G.setter_for get_data_buf
-  
-    let write_word_leb env get_data_buf code =
-      let set_word, get_word = new_local env "word" in
-      code ^^ set_word ^^
-      I32Leb.compile_store_to_data_buf_unsigned env get_word get_data_buf ^^
-      advance_data_buf get_data_buf
-  
-    let write_word_32 env get_data_buf code =
-      get_data_buf ^^ code ^^
-      G.i (Store {ty = I32Type; align = 0; offset = 0l; sz = None}) ^^
-      compile_unboxed_const Heap.word_size ^^ advance_data_buf get_data_buf
-  
-    let write_byte _env get_data_buf code =
-      get_data_buf ^^ code ^^
-      G.i (Store {ty = I32Type; align = 0; offset = 0l; sz = Some Wasm.Types.Pack8}) ^^
-      compile_unboxed_const 1l ^^ advance_data_buf get_data_buf
-  
-    let write_blob env get_data_buf get_x =
-      let set_len, get_len = new_local env "len" in
-      get_x ^^ Blob.len env ^^ set_len ^^
-      write_word_leb env get_data_buf get_len ^^
-      get_data_buf ^^
-      get_x ^^ Blob.payload_ptr_unskewed env ^^
-      get_len ^^
-      Heap.memcpy env ^^
-      get_len ^^ advance_data_buf get_data_buf
-  
-    let write_text env get_data_buf get_x =
-      let set_len, get_len = new_local env "len" in
-      get_x ^^ Text.size env ^^ set_len ^^
-      write_word_leb env get_data_buf get_len ^^
-      get_x ^^ get_data_buf ^^ Text.to_buf env ^^
-      get_len ^^ advance_data_buf get_data_buf
-  
-    let write_bignum_leb env get_data_buf get_x =
-      get_data_buf ^^
-      get_x ^^
-      BigNum.compile_store_to_data_buf_unsigned env ^^
-      advance_data_buf get_data_buf
-  
-    let write_bignum_sleb env get_data_buf get_x =
-      get_data_buf ^^
-      get_x ^^
-      BigNum.compile_store_to_data_buf_signed env ^^
-      advance_data_buf get_data_buf
-  end (* Strm *)
 
-  (* Globals recording known Candid types
-    See Note [Candid subtype checks]
+module BumpStream : Stream = struct
+  let create env get_data_size set_data_buf get_data_buf header =
+    let header_size = Int32.of_int (String.length header) in
+    get_data_size ^^ compile_add_const header_size ^^
+    Blob.dyn_alloc_scratch env ^^ set_data_buf ^^
+    get_data_buf ^^
+    Blob.lit env Tagged.B header ^^ Blob.payload_ptr_unskewed env ^^
+    compile_unboxed_const header_size ^^
+    Heap.memcpy env ^^
+    get_data_buf ^^ compile_add_const header_size ^^ set_data_buf
+
+  let check_filled env get_data_buf get_data_size =
+    get_data_buf ^^ get_data_size ^^ G.i (Binary (Wasm.Values.I32 I32Op.Add)) ^^
+    G.i (Compare (Wasm.Values.I32 I32Op.Eq)) ^^
+    E.else_trap_with env "data buffer not filled"
+
+  let terminate env get_data_buf get_data_size header_size =
+    get_data_buf ^^ compile_sub_const header_size ^^
+    get_data_size ^^ compile_add_const header_size
+
+  let finalize_buffer code = code
+
+  let name_for fn_name ts = "@" ^ fn_name ^ "<" ^ Typ_hash.typ_seq_hash ts ^ ">"
+
+  let advance_data_buf get_data_buf =
+    get_data_buf ^^ G.i (Binary (Wasm.Values.I32 I32Op.Add)) ^^ G.setter_for get_data_buf
+
+  let absolute_offset _env get_data_buf = get_data_buf
+
+  let checkpoint _env get_data_buf = G.setter_for get_data_buf
+
+  let reserve _env get_data_buf bytes =
+    get_data_buf ^^ get_data_buf ^^ compile_add_const bytes ^^ G.setter_for get_data_buf
+
+  let write_word_leb env get_data_buf code =
+    let set_word, get_word = new_local env "word" in
+    code ^^ set_word ^^
+    I32Leb.compile_store_to_data_buf_unsigned env get_word get_data_buf ^^
+    advance_data_buf get_data_buf
+
+  let write_word_32 env get_data_buf code =
+    get_data_buf ^^ code ^^
+    G.i (Store {ty = I32Type; align = 0; offset = 0L; sz = None}) ^^
+    compile_unboxed_const Heap.word_size ^^ advance_data_buf get_data_buf
+
+  let write_byte _env get_data_buf code =
+    get_data_buf ^^ code ^^
+    G.i (Store {ty = I32Type; align = 0; offset = 0L; sz = Some Wasm.Types.Pack8}) ^^
+    compile_unboxed_const 1l ^^ advance_data_buf get_data_buf
+
+  let write_blob env get_data_buf get_x =
+    let set_len, get_len = new_local env "len" in
+    get_x ^^ Blob.len env ^^ set_len ^^
+    write_word_leb env get_data_buf get_len ^^
+    get_data_buf ^^
+    get_x ^^ Blob.payload_ptr_unskewed env ^^
+    get_len ^^
+    Heap.memcpy env ^^
+    get_len ^^ advance_data_buf get_data_buf
+
+  let write_text env get_data_buf get_x =
+    let set_len, get_len = new_local env "len" in
+    get_x ^^ Text.size env ^^ set_len ^^
+    write_word_leb env get_data_buf get_len ^^
+    get_x ^^ get_data_buf ^^ Text.to_buf env ^^
+    get_len ^^ advance_data_buf get_data_buf
+
+  let write_bignum_leb env get_data_buf get_x =
+    get_data_buf ^^
+    get_x ^^
+    BigNum.compile_store_to_data_buf_unsigned env ^^
+    advance_data_buf get_data_buf
+
+  let write_bignum_sleb env get_data_buf get_x =
+    get_data_buf ^^
+    get_x ^^
+    BigNum.compile_store_to_data_buf_signed env ^^
+    advance_data_buf get_data_buf
+
+end
+
+module MakeSerialization (Strm : Stream) = struct
+  (*
+    The general serialization strategy is as follows:
+    * We statically generate the IDL type description header.
+    * We traverse the data to calculate the size needed for the data buffer and the
+      reference buffer.
+    * We allocate memory for the data buffer and the reference buffer
+      (this memory area is not referenced, so will be dead with the next GC)
+    * We copy the IDL type header to the data buffer.
+    * We traverse the data and serialize it into the data buffer.
+      This is type driven, and we use the `share_code` machinery and names that
+      properly encode the type to resolve loops in a convenient way.
+    * We externalize all that new data space into a databuf
+    * We externalize the reference space into a elembuf
+    * We pass both databuf and elembuf to shared functions
+      (this mimicks the future system API)
+
+    The deserialization is analogous:
+    * We allocate some scratch space, and internalize the databuf and elembuf into it.
+    * We parse the data, in a type-driven way, using normal construction and
+      allocation, while keeping tabs on the type description header for subtyping.
+    * At the end, the scratch space is a hole in the heap, and will be reclaimed
+      by the next GC.
   *)
-  let register_delayed_globals env =
-    (E.add_global32_delayed env "__candid_data_length" Immutable,
-    E.add_global32_delayed env "__type_offsets_length" Immutable,
-    E.add_global32_delayed env "__idl_types_length" Immutable)
-
-  let get_candid_data_length env =
-    G.i (GlobalGet (nr (E.get_global env "__candid_data_length")))
-  let get_type_offsets_length env =
-    G.i (GlobalGet (nr (E.get_global env "__type_offsets_length")))
-  let get_idl_types_length env =
-    G.i (GlobalGet (nr (E.get_global env "__idl_types_length")))
-
-  let candid_type_offset_size = 4l
-
-  let get_global_type_descriptor env =
-    match !(E.(env.global_type_descriptor)) with
-    | Some descriptor -> descriptor
-    | None -> assert false
 
-  let get_global_candid_data env =
-    Tagged.share env (fun env -> 
-      let descriptor = get_global_type_descriptor env in
-      Blob.load_data_segment env Tagged.B E.(descriptor.candid_data_segment) (get_candid_data_length env)
-    )
+  module Strm = Strm
 
-  let get_global_type_offsets env =
-    Tagged.share env (fun env -> 
-      let descriptor = get_global_type_descriptor env in
-      Blob.load_data_segment env Tagged.B E.(descriptor.type_offsets_segment) (get_type_offsets_length env)
-    )
+  (* Globals recording known Candid types
+     See Note [Candid subtype checks]
+   *)
+
+  let register_delayed_globals env =
+    (E.add_global32_delayed env "__typtbl" Immutable,
+     E.add_global32_delayed env "__typtbl_end" Immutable,
+     E.add_global32_delayed env "__typtbl_size" Immutable,
+     E.add_global32_delayed env "__typtbl_idltyps" Immutable)
+
+  let get_typtbl env =
+    G.i (GlobalGet (nr (E.get_global env "__typtbl")))
+  let get_typtbl_size env =
+    G.i (GlobalGet (nr (E.get_global env "__typtbl_size")))
+  let get_typtbl_end env =
+    G.i (GlobalGet (nr (E.get_global env "__typtbl_end")))
+  let get_typtbl_idltyps env =
+    G.i (GlobalGet (nr (E.get_global env "__typtbl_idltyps")))
 
-  let get_global_idl_types env =
-    Tagged.share env (fun env -> 
-      let descriptor = get_global_type_descriptor env in
-      Blob.load_data_segment env Tagged.B E.(descriptor.idl_types_segment) (get_idl_types_length env)
-    )
-  
   module Registers = struct
 
     (* interval for checking instruction counter *)
@@ -6349,9 +6614,6 @@ module Serialization = struct
       E.add_global32 env "@@typtbl" Mutable 0l;
       E.add_global32 env "@@typtbl_end" Mutable 0l;
       E.add_global32 env "@@typtbl_size" Mutable 0l;
-      E.add_global32 env "@@global_typtbl" Mutable 0l;
-      E.add_global32 env "@@global_typtbl_end" Mutable 0l;
-      E.add_global32 env "@@global_typtbl_size" Mutable 0l;
       E.add_global32 env "@@value_denominator" Mutable idl_value_denominator;
       E.add_global32 env "@@value_numerator" Mutable idl_value_numerator;
       E.add_global32 env "@@value_bias" Mutable idl_value_bias;
@@ -6387,33 +6649,6 @@ module Serialization = struct
     let set_typtbl_size env =
       G.i (GlobalSet (nr (E.get_global env "@@typtbl_size")))
 
-    let get_global_typtbl env =
-      G.i (GlobalGet (nr (E.get_global env "@@global_typtbl")))
-    let set_global_typtbl env =
-      G.i (GlobalSet (nr (E.get_global env "@@global_typtbl")))
-
-    let get_global_typtbl_end env =
-      G.i (GlobalGet (nr (E.get_global env "@@global_typtbl_end")))
-    let set_global_typtbl_end env =
-      G.i (GlobalSet (nr (E.get_global env "@@global_typtbl_end")))
-
-    let get_global_typtbl_size env =
-      G.i (GlobalGet (nr (E.get_global env "@@global_typtbl_size")))
-    let set_global_typtbl_size env =
-      G.i (GlobalSet (nr (E.get_global env "@@global_typtbl_size")))
-
-    (* Used as safety guard that no temporary pointers remain in the registers across GC increments. *)
-    let clear_registers env =
-      compile_unboxed_const 0l ^^ set_rel_buf_opt env ^^
-      compile_unboxed_const 0l ^^ set_data_buf env ^^
-      compile_unboxed_const 0l ^^ set_ref_buf env ^^
-      compile_unboxed_const 0l ^^ set_typtbl env ^^
-      compile_unboxed_const 0l ^^ set_typtbl_end env ^^
-      compile_unboxed_const 0l ^^ set_typtbl_size env ^^ (* also reset for symmetry, even if no pointer *)
-      compile_unboxed_const 0l ^^ set_global_typtbl env ^^
-      compile_unboxed_const 0l ^^ set_global_typtbl_end env ^^
-      compile_unboxed_const 0l ^^ set_global_typtbl_size env (* also reset for symmetry, even if no pointer *)
-
     let get_value_quota env =
       G.i (GlobalGet (nr (E.get_global env "@@value_quota")))
     let set_value_quota env =
@@ -6514,12 +6749,7 @@ module Serialization = struct
   *)
 
   module TM = Map.Make (Type.Ord)
-
-  type mode =
-    | Candid
-    | Persistence
-
-  let to_idl_prim mode = let open Type in function
+  let to_idl_prim = let open Type in function
     | Prim Null | Tup [] -> Some 1l
     | Prim Bool -> Some 2l
     | Prim Nat -> Some 3l
@@ -6539,11 +6769,6 @@ module Serialization = struct
     | Non -> Some 17l
     | Prim Principal -> Some 24l
     | Prim Region -> Some 128l
-    (* only used for memory compatibility checks *)
-    | Prim Blob -> 
-      (match mode with
-      | Candid -> None
-      | Persistence -> Some 129l)
     | _ -> None
 
   (* some constants, also see rts/idl.c *)
@@ -6554,12 +6779,9 @@ module Serialization = struct
   let idl_func      = -22l
   let idl_service   = -23l
   let idl_alias     = 1l (* see Note [mutable stable values] *)
-  
-  (* only used for memory compatibility checks *)
-  let idl_tuple     = -130l
 
   (* TODO: use record *)
-  let type_desc env mode ts :
+  let type_desc env ts :
      string * int list * int32 list  (* type_desc, (relative offsets), indices of ts *)
     =
     let open Type in
@@ -6571,7 +6793,7 @@ module Serialization = struct
       let idx = ref TM.empty in
       let rec go t =
         let t = Type.normalize t in
-        if to_idl_prim mode t <> None then () else
+        if to_idl_prim t <> None then () else
         if TM.mem t !idx then () else begin
           idx := TM.add t (Lib.List32.length !typs) !idx;
           typs := !typs @ [ t ];
@@ -6630,29 +6852,26 @@ module Serialization = struct
 
     let add_idx t =
       let t = Type.normalize t in
-      match to_idl_prim mode t with
+      match to_idl_prim t with
       | Some i -> add_sleb128 (Int32.neg i)
       | None -> add_sleb128 (TM.find (normalize t) idx) in
 
     let idx t =
       let t = Type.normalize t in
-      match to_idl_prim mode t with
+      match to_idl_prim t with
       | Some i -> Int32.neg i
       | None -> TM.find (normalize t) idx in
 
     let rec add_typ t =
       match t with
       | Non -> assert false
-      | Prim Blob -> 
-        assert (mode = Candid);
+      | Prim Blob ->
         add_typ Type.(Array (Prim Nat8))
       | Prim Region ->
         add_sleb128 idl_alias; add_idx t
       | Prim _ -> assert false
       | Tup ts ->
-        add_sleb128 (match mode with
-        | Candid -> idl_record
-        | Persistence -> idl_tuple);
+        add_sleb128 idl_record;
         add_leb128 (List.length ts);
         List.iteri (fun i t ->
           add_leb128 i;
@@ -6723,29 +6942,22 @@ module Serialization = struct
      List.map idx ts)
 
   (* See Note [Candid subtype checks] *)
-  let reserve_global_type_descriptor (env : E.t) =
-    let candid_data_segment = E.add_data_segment env "" in
-    let type_offsets_segment = E.add_data_segment env "" in
-    let idl_types_segment = E.add_data_segment env "" in
-    E.(env.global_type_descriptor := Some {
-      candid_data_segment;
-      type_offsets_segment;
-      idl_types_segment;
-    })
-
-  let create_global_type_descriptor (env : E.t) (set_candid_data_length, set_type_offsets_length, set_idl_types_length) =
-    let descriptor = get_global_type_descriptor env in
-    let candid_data, type_offsets, idl_types = type_desc env Candid (E.get_typtbl_typs env) in
-    let candid_data_binary = [StaticBytes.Bytes candid_data] in
-    let candid_data_length = E.replace_data_segment env E.(descriptor.candid_data_segment) candid_data_binary in
-    set_candid_data_length candid_data_length;
-    let type_offsets_binary = [StaticBytes.i32s (List.map Int32.of_int type_offsets)] in
-    let type_offsets_length = E.replace_data_segment env E.(descriptor.type_offsets_segment) type_offsets_binary in
-    set_type_offsets_length type_offsets_length;
-    let idl_types_binary = [StaticBytes.i32s idl_types] in
-    let idl_types_length = E.replace_data_segment env E.(descriptor.idl_types_segment) idl_types_binary in
-    set_idl_types_length idl_types_length
-
+  let set_delayed_globals (env : E.t) (set_typtbl, set_typtbl_end, set_typtbl_size, set_typtbl_idltyps) =
+    let typdesc, offsets, idltyps = type_desc env (E.get_typtbl_typs env) in
+    let static_typedesc = E.add_static_unskewed env [StaticBytes.Bytes typdesc] in
+    let static_typtbl =
+      let bytes = StaticBytes.i32s
+        (List.map (fun offset ->
+          Int32.(add static_typedesc (of_int(offset))))
+        offsets)
+      in
+      E.add_static_unskewed env [bytes]
+    in
+    let static_idltyps = E.add_static_unskewed env [StaticBytes.i32s idltyps] in
+    set_typtbl static_typtbl;
+    set_typtbl_end Int32.(add static_typedesc (of_int (String.length typdesc)));
+    set_typtbl_size (Int32.of_int (List.length offsets));
+    set_typtbl_idltyps static_idltyps
 
   (* Returns data (in bytes) and reference buffer size (in entries) needed *)
   let rec buffer_size env t =
@@ -6989,11 +7201,11 @@ module Serialization = struct
       | Prim Float ->
         reserve env get_data_buf 8l ^^
         get_x ^^ Float.unbox env ^^
-        G.i (Store {ty = F64Type; align = 0; offset = 0l; sz = None})
+        G.i (Store {ty = F64Type; align = 0; offset = 0L; sz = None})
       | Prim ((Int64|Nat64) as pty) ->
         reserve env get_data_buf 8l ^^
         get_x ^^ BoxedWord64.unbox env pty ^^
-        G.i (Store {ty = I64Type; align = 0; offset = 0l; sz = None})
+        G.i (Store {ty = I64Type; align = 0; offset = 0L; sz = None})
       | Prim ((Int32|Nat32) as ty) ->
         write_word_32 env get_data_buf (get_x ^^ BoxedSmallWord.unbox env ty)
       | Prim Char ->
@@ -7001,7 +7213,7 @@ module Serialization = struct
       | Prim ((Int16|Nat16) as ty) ->
         reserve env get_data_buf 2l ^^
         get_x ^^ TaggedSmallWord.lsb_adjust ty ^^
-        G.i (Store {ty = I32Type; align = 0; offset = 0l; sz = Some Wasm.Types.Pack16})
+        G.i (Store {ty = I32Type; align = 0; offset = 0L; sz = Some Wasm.Types.Pack16})
       | Prim ((Int8|Nat8) as ty) ->
         write_byte env get_data_buf (get_x ^^ TaggedSmallWord.lsb_adjust ty)
       | Prim Bool ->
@@ -7024,7 +7236,7 @@ module Serialization = struct
         write_alias (fun () ->
           reserve env get_data_buf 8l ^^
           get_x ^^ Region.id env ^^
-          G.i (Store {ty = I64Type; align = 0; offset = 0l; sz = None}) ^^
+          G.i (Store {ty = I64Type; align = 0; offset = 0L; sz = None}) ^^
           write_word_32 env get_data_buf (get_x ^^ Region.page_count env) ^^
           write_blob env get_data_buf (get_x ^^ Region.vec_pages env)
         )
@@ -7079,45 +7291,48 @@ module Serialization = struct
 
   (* This value is returned by deserialize_go if deserialization fails in a way
      that should be recoverable by opt parsing.
-     It is an (invalid) sentinel pointer value (in skewed format) and can be used for pointer comparison.
-     It will be never placed on the heap and must not be dereferenced.
-     If unskewed, it refers to the unallocated last Wasm memory page.
+     By virtue of being a deduped static value, it can be detected by pointer
+     comparison.
   *)
-  let coercion_error_value env = 0xffff_fffdl
+  let coercion_error_value env : int32 =
+    Tagged.shared_static_obj env Tagged.CoercionFailure []
 
   (* See Note [Candid subtype checks] *)
-  let with_rel_buf_opt env extended get_typtbl_size1 get_typtbl_size2 f =
+  let with_rel_buf_opt env extended get_typtbl_size1 f =
     if extended then
       f (compile_unboxed_const 0l)
     else
-      get_typtbl_size1 ^^ get_typtbl_size2 ^^
+      get_typtbl_size1 ^^ get_typtbl_size env ^^
       E.call_import env "rts" "idl_sub_buf_words" ^^
       Stack.dynamic_with_words env "rel_buf" (fun get_ptr ->
-        get_ptr ^^ get_typtbl_size1 ^^ get_typtbl_size2 ^^
+        get_ptr ^^ get_typtbl_size1 ^^ get_typtbl_size env ^^
         E.call_import env "rts" "idl_sub_buf_init" ^^
         f get_ptr)
 
   (* See Note [Candid subtype checks] *)
   let idl_sub env t2 =
     let idx = E.add_typtbl_typ env t2 in
-    get_global_idl_types env ^^
-    Blob.payload_ptr_unskewed env ^^
-    G.i (Load {ty = I32Type; align = 0; offset = Int32.mul idx candid_type_offset_size (*!*); sz = None}) ^^
-    Func.share_code2 Func.Always env ("idl_sub")
-      (("idltyp1", I32Type),
+    get_typtbl_idltyps env ^^
+    G.i (Load {ty = I32Type; align = 0; offset = Int64.of_int32 (Int32.mul idx 4l) (*!*); sz = None}) ^^
+    Func.share_code6 Func.Always env ("idl_sub")
+      (("rel_buf", I32Type),
+       ("typtbl1", I32Type),
+       ("typtbl_end1", I32Type),
+       ("typtbl_size1", I32Type),
+       ("idltyp1", I32Type),
        ("idltyp2", I32Type)
       )
       [I32Type]
-      (fun env get_idltyp1 get_idltyp2 ->
-        Registers.get_rel_buf_opt env ^^
+      (fun env get_rel_buf get_typtbl1 get_typtbl_end1 get_typtbl_size1 get_idltyp1 get_idltyp2 ->
+        get_rel_buf ^^
         E.else_trap_with env "null rel_buf" ^^
-        Registers.get_rel_buf_opt env ^^
-        Registers.get_typtbl env ^^
-        Registers.get_global_typtbl env ^^
-        Registers.get_typtbl_end env ^^
-        Registers.get_global_typtbl_end env ^^
-        Registers.get_typtbl_size env ^^
-        Registers.get_global_typtbl_size env ^^
+        get_rel_buf ^^
+        get_typtbl1 ^^
+        get_typtbl env ^^
+        get_typtbl_end1 ^^
+        get_typtbl_end env ^^
+        get_typtbl_size1 ^^
+        get_typtbl_size env ^^
         get_idltyp1 ^^
         get_idltyp2 ^^
         E.call_import env "rts" "idl_sub")
@@ -7168,7 +7383,7 @@ module Serialization = struct
       let get_data_buf = Registers.get_data_buf env in
       let _get_ref_buf = Registers.get_ref_buf env in
       let get_typtbl = Registers.get_typtbl env in
-      let _get_typtbl_end = Registers.get_typtbl_end env in
+      let get_typtbl_end = Registers.get_typtbl_end env in
       let get_typtbl_size = Registers.get_typtbl_size env in
 
       (* Decrement and check idl quota *)
@@ -7244,7 +7459,7 @@ module Serialization = struct
       (* returns true if we are looking at primitive type with this id *)
       let check_prim_typ t =
         get_idltyp ^^
-        compile_eq_const (Int32.neg (Option.get (to_idl_prim Candid t)))
+        compile_eq_const (Int32.neg (Option.get (to_idl_prim t)))
       in
 
       let with_prim_typ t f =
@@ -7393,7 +7608,7 @@ module Serialization = struct
         begin
           (* sanity check *)
           get_arg_typ ^^
-          compile_eq_const (Int32.neg (Option.get (to_idl_prim Candid (Prim Region)))) ^^
+          compile_eq_const (Int32.neg (Option.get (to_idl_prim (Prim Region)))) ^^
           E.else_trap_with env "IDL error: unexpecting primitive alias type" ^^
           get_arg_typ
         end
@@ -7476,7 +7691,7 @@ module Serialization = struct
           end begin
           (* Decoded before. Check type hash *)
           ReadBuf.read_word32 env get_data_buf ^^ Blob.lit env Tagged.B (typ_hash t) ^^
-          Blob.compare env (Some Operator.EqOp) ^^
+          G.i (Compare (Wasm.Values.I32 I32Op.Eq)) ^^
           E.else_trap_with env ("Stable memory error: Aliased at wrong type, expected: " ^ typ_hash t)
         end ^^
 
@@ -7660,7 +7875,7 @@ module Serialization = struct
           let (set_region, get_region) = new_local env "region" in
           (* sanity check *)
           get_region_typ ^^
-          compile_eq_const (Int32.neg (Option.get (to_idl_prim Candid (Prim Region)))) ^^
+          compile_eq_const (Int32.neg (Option.get (to_idl_prim (Prim Region)))) ^^
           E.else_trap_with env "deserialize_go (Region): unexpected idl_typ" ^^
           (* pre-allocate a region object, with dummy fields *)
           compile_const_64 0L ^^ (* id *)
@@ -7787,6 +8002,10 @@ module Serialization = struct
         get_rel_buf_opt ^^
         G.if1 I32Type
           begin
+            get_rel_buf_opt ^^
+            get_typtbl ^^
+            get_typtbl_end ^^
+            get_typtbl_size ^^
             get_idltyp ^^
             idl_sub env t
           end
@@ -7808,6 +8027,10 @@ module Serialization = struct
         get_rel_buf_opt ^^
         G.if1 I32Type
           begin
+            get_rel_buf_opt ^^
+            get_typtbl ^^
+            get_typtbl_end ^^
+            get_typtbl_size ^^
             get_idltyp ^^
             idl_sub env t
           end
@@ -7842,7 +8065,7 @@ module Serialization = struct
       let (set_data_size, get_data_size) = new_local env "data_size" in
       let (set_refs_size, get_refs_size) = new_local env "refs_size" in
 
-      let (tydesc, _offsets, _idltyps) = type_desc env Candid ts in
+      let (tydesc, _offsets, _idltyps) = type_desc env ts in
       let tydesc_len = Int32.of_int (String.length tydesc) in
 
       (* Get object sizes *)
@@ -7914,11 +8137,6 @@ module Serialization = struct
       Stack.with_words env "get_typtbl_ptr" 1l (fun get_typtbl_ptr ->
       Stack.with_words env "get_maintyps_ptr" 1l (fun get_maintyps_ptr ->
 
-      (* Allocate space for out parameters of idl_alloc_typtbl *)
-      Stack.with_words env "get_global_typtbl_ptr" 1l (fun get_global_typtbl_ptr ->
-      Stack.with_words env "get_global_typtbl_end_ptr" 1l (fun get_global_typtbl_end_ptr ->
-      Stack.with_words env "get_global_typtbl_size_ptr" 1l (fun get_global_typtbl_size_ptr ->
-
       (* Set up read buffers *)
       ReadBuf.alloc env (fun get_data_buf -> ReadBuf.alloc env (fun get_ref_buf ->
 
@@ -7931,21 +8149,8 @@ module Serialization = struct
       Bool.lit extended ^^ get_data_buf ^^ get_typtbl_ptr ^^ get_typtbl_size_ptr ^^ get_maintyps_ptr ^^
       E.call_import env "rts" "parse_idl_header" ^^
 
-      (* Allocate global type type, if necessary for subtype checks *)
-      (if extended then
-         G.nop
-       else begin
-         get_global_candid_data env ^^
-         get_global_type_offsets env ^^
-         get_global_typtbl_ptr ^^ get_global_typtbl_end_ptr ^^ get_global_typtbl_size_ptr ^^
-         E.call_import env "rts" "idl_alloc_typtbl"
-      end) ^^
-
       (* Allocate memo table, if necessary *)
-      with_rel_buf_opt env extended
-        (get_typtbl_size_ptr ^^ load_unskewed_ptr)
-        (get_global_typtbl_size_ptr ^^ load_unskewed_ptr)
-        (fun get_rel_buf_opt ->
+      with_rel_buf_opt env extended (get_typtbl_size_ptr ^^ load_unskewed_ptr) (fun get_rel_buf_opt ->
       begin
         (* set up invariant register arguments *)
         get_rel_buf_opt ^^ Registers.set_rel_buf_opt env ^^
@@ -7954,9 +8159,6 @@ module Serialization = struct
         get_typtbl_ptr ^^ load_unskewed_ptr ^^ Registers.set_typtbl env ^^
         get_maintyps_ptr ^^ load_unskewed_ptr ^^ Registers.set_typtbl_end env ^^
         get_typtbl_size_ptr ^^ load_unskewed_ptr ^^ Registers.set_typtbl_size env ^^
-        get_global_typtbl_ptr ^^ load_unskewed_ptr ^^ Registers.set_global_typtbl env ^^
-        get_global_typtbl_end_ptr ^^ load_unskewed_ptr ^^ Registers.set_global_typtbl_end env ^^
-        get_global_typtbl_size_ptr ^^ load_unskewed_ptr ^^ Registers.set_global_typtbl_size env ^^
         Registers.reset_value_limit env get_blob get_rel_buf_opt
       end ^^
 
@@ -8020,11 +8222,8 @@ module Serialization = struct
         ReadBuf.is_empty env get_data_buf ^^
         E.else_trap_with env ("IDL error: left-over bytes " ^ ts_name) ^^
         ReadBuf.is_empty env get_ref_buf ^^
-        E.else_trap_with env ("IDL error: left-over references " ^ ts_name) ^^
-
-        (* Safety guard: The temporary pointers in the registers must no longer be used when a GC increment runs. *)
-        Registers.clear_registers env
-      )))))))))
+        E.else_trap_with env ("IDL error: left-over references " ^ ts_name)
+      ))))))
 
     ))
 
@@ -8146,18 +8345,12 @@ encountered during code generation, the other is determined
 dynamically by, e.g. message payload. The latter will vary with
 each payload to decode.
 
-The static type table and a type descriptor are stored in passive 
-data segments. Instead of absolute memory addresses, the static type 
-table in the data segment only contains relative offsets into type 
-descriptor. When loaded, these offsets are patched by static addresses 
-that point into the type descriptor.
-
 The known Motoko types are accumulated in a global list as required
 and then, in a final compilation step, encoded to global type table
-and the type descriptor (sequence of type indices). The encoding is 
-stored in passive data segments referenced (by way of segment indices) 
-from dedicated wasm globals so that we can generate code that 
-references the globals before their final definitions are known.
+and sequence of type indices. The encoding is stored as static
+data referenced by dedicated wasm globals so that we can generate
+code that references the globals before their final definitions are
+known.
 
 Deserializing a proper (not extended) Candid value stack allocates a
 mutable word buffer, of size determined by `idl_sub_buf_words`.
@@ -8184,76 +8377,287 @@ from a Boolean to a three-valued argument to efficiently check equality for
 invariant type constructors in a single pass.
 *)
 
-end (* Serialization *)
+end (* MakeSerialization *)
+
+module Serialization = MakeSerialization(BumpStream)
+
+module BlobStream : Stream = struct
+  let create env get_data_size set_token get_token header =
+    let header_size = Int32.of_int (String.length header) in
+    get_data_size ^^ compile_add_const header_size ^^
+    E.call_import env "rts" "alloc_stream" ^^ set_token ^^ (* allocation barrier called in alloc_stream *)
+    get_token ^^
+    Blob.lit env Tagged.B header ^^
+    E.call_import env "rts" "stream_write_text"
+
+  let check_filled env get_token get_data_size =
+    G.i Drop
+
+  let terminate env get_token _get_data_size _header_size =
+    get_token ^^ E.call_import env "rts" "stream_split" ^^
+    let set_blob, get_blob = new_local env "blob" in
+    set_blob ^^
+    get_blob ^^ Blob.payload_ptr_unskewed env ^^
+    get_blob ^^ Blob.len env
+
+  let finalize_buffer code = code
+
+  let name_for fn_name ts = "@Bl_" ^ fn_name ^ "<" ^ Typ_hash.typ_seq_hash ts ^ ">"
+
+  let absolute_offset env get_token =
+    let offset = 8l in (* see invariant in `stream.rs` *)
+    let filled_field = Int32.add (Blob.len_field env) offset in
+    get_token ^^ Tagged.load_field_unskewed env filled_field
+
+  let checkpoint _env _get_token = G.i Drop
+
+  let reserve env get_token bytes =
+    get_token ^^ compile_unboxed_const bytes ^^ E.call_import env "rts" "stream_reserve"
+
+  let write_word_leb env get_token code =
+    let set_word, get_word = new_local env "word" in
+    code ^^ set_word ^^
+    I32Leb.compile_store_to_data_buf_unsigned env get_word
+      (get_token ^^ I32Leb.compile_leb128_size get_word ^^ E.call_import env "rts" "stream_reserve") ^^
+    G.i Drop
+
+  let write_word_32 env get_token code =
+    reserve env get_token Heap.word_size ^^
+    code ^^
+    G.i (Store {ty = I32Type; align = 0; offset = 0L; sz = None})
+
+  let write_byte env get_token code =
+    get_token ^^ code ^^
+    E.call_import env "rts" "stream_write_byte"
+
+  let write_blob env get_token get_x =
+    let set_len, get_len = new_local env "len" in
+    get_x ^^ Blob.len env ^^ set_len ^^
+    write_word_leb env get_token get_len ^^
+    get_token ^^
+    get_x ^^ Blob.payload_ptr_unskewed env ^^
+    get_len ^^
+    E.call_import env "rts" "stream_write"
+
+  let write_text env get_token get_x =
+    write_word_leb env get_token (get_x ^^ Text.size env) ^^
+    get_token ^^ get_x ^^
+    E.call_import env "rts" "stream_write_text"
+
+  let write_bignum_leb env get_token get_x =
+    get_token ^^ get_x ^^
+    BigNum.compile_store_to_stream_unsigned env
+
+  let write_bignum_sleb env get_token get_x =
+    get_token ^^ get_x ^^
+    BigNum.compile_store_to_stream_signed env
+
+end
+
 
-(* OldStabilization as migration code: 
-  Deserializing a last time from explicit stable memory into the stable heap:
+(* Stabilization (serialization to/from stable memory) of both:
    * stable variables; and
    * virtual stable memory.
    c.f.
    * ../../design/Stable.md
    * ../../design/StableMemory.md
 *)
-module OldStabilization = struct
+
+module Stabilization = struct
+
   let extend64 code = code ^^ G.i (Convert (Wasm.Values.I64 I64Op.ExtendUI32))
-  
-  let _read_word32 env =
-    StableMem.read env false "word32" I32Type 4l load_unskewed_ptr
-  let write_word32 env =
-    StableMem.write env false "word32" I32Type 4l store_unskewed_ptr
 
-  (* read and clear word32 from stable mem offset on stack *)
-  let read_and_clear_word32 env =
-    match E.mode env with
-    | Flags.ICMode | Flags.RefMode ->
-      Func.share_code1 Func.Always env "__stablemem_read_and_clear_word32"
-        ("offset", I64Type) [I32Type]
-        (fun env get_offset ->
-          Stack.with_words env "temp_ptr" 1l (fun get_temp_ptr ->
-            let (set_word, get_word) = new_local env "word" in
-            (* read word *)
-            get_temp_ptr ^^ G.i (Convert (Wasm.Values.I64 I64Op.ExtendUI32)) ^^
-            get_offset ^^
-            compile_const_64 4L ^^
-            StableMem.stable64_read env ^^
-            get_temp_ptr ^^ load_unskewed_ptr ^^
-            set_word ^^
-            (* write 0 *)
-            get_temp_ptr ^^ compile_unboxed_const 0l ^^ store_unskewed_ptr ^^
-            get_offset ^^
-            get_temp_ptr ^^ G.i (Convert (Wasm.Values.I64 I64Op.ExtendUI32)) ^^
-            compile_const_64 4L ^^
-            StableMem.stable64_write env ^^
-            (* return word *)
-            get_word
-        ))
-    | _ -> assert false
+  (* The below stream implementation is geared towards the
+     tail section of stable memory, where the serialised
+     stable variables go. As such a few intimate details of
+     the stable memory layout are burnt in, such as the
+     variable `N` from the design document. *)
+  module StableMemoryStream : Stream = struct
+    include BlobStream
+
+    let name_for fn_name ts = "@Sm_" ^ fn_name ^ "<" ^ Typ_hash.typ_seq_hash ts ^ ">"
+
+    let create env get_data_size set_token get_token header =
+      create env (compile_unboxed_const 0x8000l) set_token get_token header ^^
+        (* TODO: push header directly? *)
 
-  (* TODO: rewrite using MemoryFill *)
-  let blob_clear env =
-    Func.share_code1 Func.Always env "blob_clear" ("x", I32Type) [] (fun env get_x ->
-      let (set_ptr, get_ptr) = new_local env "ptr" in
       let (set_len, get_len) = new_local env "len" in
-      get_x ^^
-      Blob.as_ptr_len env ^^
+      get_data_size ^^
+      compile_add_const (Int32.of_int (String.length header)) ^^
       set_len ^^
-      set_ptr ^^
 
-      (* round to word size *)
-      get_len ^^
-      compile_add_const (Int32.sub Heap.word_size 1l) ^^
-      compile_divU_const Heap.word_size ^^
+      let (set_dst, get_dst) = new_local64 env "dst" in
+      StableMem.get_mem_size env ^^
+      compile_shl64_const (Int64.of_int page_size_bits) ^^
+      compile_add64_const 4L ^^ (* `N` is now on the stack *)
+      set_dst ^^
+
+      get_dst ^^
+      extend64 get_len ^^
+      StableMem.ensure env ^^
+
+      get_token ^^
+      get_dst ^^
+      get_dst ^^ extend64 get_len ^^
+      G.i (Binary (Wasm.Values.I64 I64Op.Add)) ^^
+      E.call_import env "rts" "stream_stable_dest"
+
+    let ptr64_field env =
+      let offset = 1l in (* see invariant in `stream.rs` *)
+      Int32.add (Blob.len_field env) offset (* see invariant in `stream.rs`, padding for 64-bit after Stream header *)
+
+    let terminate env get_token get_data_size header_size =
+      get_token ^^
+      E.call_import env "rts" "stream_shutdown" ^^
+      compile_unboxed_zero ^^ (* no need to write *)
+      get_token ^^
+      Tagged.load_field64_unskewed env (ptr64_field env) ^^
+      StableMem.get_mem_size env ^^
+      compile_shl64_const (Int64.of_int page_size_bits) ^^
+      G.i (Binary (Wasm.Values.I64 I64Op.Sub)) ^^
+      compile_sub64_const 4L ^^  (* `N` is now subtracted *)
+      G.i (Convert (Wasm.Values.I32 I32Op.WrapI64))
 
-      (* clear all words *)
-      from_0_to_n env (fun get_i ->
-        get_ptr ^^
-        compile_unboxed_const 0l ^^
-        store_unskewed_ptr ^^
-        get_ptr ^^
-        compile_add_const Heap.word_size ^^
-        set_ptr))
+    let finalize_buffer _ = G.nop (* everything is outputted already *)
+
+    (* Returns a 32-bit unsigned int that is the number of bytes that would
+       have been written to stable memory if flushed. The difference
+       of two such numbers will always be an exact byte distance. *)
+    let absolute_offset env get_token =
+      let start64_field = Int32.add (ptr64_field env) 2l in (* see invariant in `stream.rs` *)
+      absolute_offset env get_token ^^
+      get_token ^^
+      Tagged.load_field64_unskewed env (ptr64_field env) ^^
+      get_token ^^
+      Tagged.load_field64_unskewed env start64_field ^^
+      G.i (Binary (Wasm.Values.I64 I64Op.Sub)) ^^
+      G.i (Convert (Wasm.Values.I32 I32Op.WrapI64)) ^^
+      G.i (Binary (Wasm.Values.I32 I32Op.Add))
+  end
+
+  module Externalization = MakeSerialization(StableMemoryStream)
+
+  let stabilize env t =
+    let (set_dst, get_dst) = new_local env "dst" in
+    let (set_len, get_len) = new_local env "len" in
+
+    (if !Flags.gc_strategy = Flags.Incremental then
+      E.call_import env "rts" "stop_gc_on_upgrade"
+    else
+      G.nop) ^^
+
+
+    Externalization.serialize env [t] ^^
+    set_len ^^
+    set_dst ^^
+
+    StableMem.get_mem_size env ^^
+    G.i (Test (Wasm.Values.I64 I64Op.Eqz)) ^^
+    G.if0
+      begin
+        (* assert StableMem.get_version() == StableMem.version_no_stable_memory *)
+        StableMem.get_version env ^^
+        compile_eq_const StableMem.version_no_stable_memory ^^
+        E.else_trap_with env "StableMem.get_version() != version_no_stable_memory" ^^
+
+        (* Case-true: Stable variables only --
+           no use of either regions or experimental API. *)
+        (* ensure [0,..,3,...len+4) *)
+        compile_const_64 0L ^^
+        extend64 get_len ^^
+        compile_add64_const 4L ^^  (* reserve one word for size *)
+        StableMem.ensure env ^^
+
+        (* write len to initial word of stable memory*)
+        compile_const_64 0L ^^
+        get_len ^^
+        StableMem.write_word32 env ^^
+
+        (* copy data to following stable memory *)
+        Externalization.Strm.finalize_buffer
+          begin
+            compile_const_64 4L ^^
+            extend64 get_dst ^^
+            extend64 get_len ^^
+            StableMem.stable64_write env
+          end
+      end
+      begin
+        (* Case-false: Either regions or experimental API. *)
+        let (set_N, get_N) = new_local64 env "N" in
+
+        (* let N = !size * page_size *)
+        StableMem.get_mem_size env ^^
+        compile_shl64_const (Int64.of_int page_size_bits) ^^
+        set_N ^^
+
+        (* grow mem to page including address
+           N + 4 + len + 4 + 4 + 4 = N + len + 16
+        *)
+        get_N ^^
+        extend64 get_len ^^
+        compile_add64_const 16L ^^
+        StableMem.ensure env ^^
+
+        get_N ^^
+        get_len ^^
+        StableMem.write_word32 env ^^
+
+        (* copy data to following stable memory *)
+        Externalization.Strm.finalize_buffer
+          begin
+            get_N ^^
+            compile_add64_const 4L ^^
+            extend64 get_dst ^^
+            extend64 get_len ^^
+            StableMem.stable64_write env
+          end ^^
 
-  let old_destabilize env ty save_version =
+        (* let M = pagesize * ic0.stable64_size() - 1 *)
+        (* M is beginning of last page *)
+        let (set_M, get_M) = new_local64 env "M" in
+        StableMem.stable64_size env ^^
+        compile_sub64_const 1L ^^
+        compile_shl64_const (Int64.of_int page_size_bits) ^^
+        set_M ^^
+
+        (* store mem_size at M + (pagesize - 12) *)
+        get_M ^^
+        compile_add64_const (Int64.sub page_size64 12L) ^^
+        StableMem.get_mem_size env ^^
+        G.i (Convert (Wasm.Values.I32 I32Op.WrapI64)) ^^
+        (* TODO: write word64 *)
+        StableMem.write_word32 env ^^
+
+        (* save first word at M + (pagesize - 8);
+           mark first word as 0 *)
+        get_M ^^
+        compile_add64_const (Int64.sub page_size64 8L) ^^
+        compile_const_64 0L ^^
+        StableMem.read_and_clear_word32 env ^^
+        StableMem.write_word32 env ^^
+
+        (* save version at M + (pagesize - 4) *)
+        get_M ^^
+        compile_add64_const (Int64.sub page_size64 4L) ^^
+
+        (* assert StableMem.get_version() > StableMem.version_no_stable_memory *)
+        StableMem.get_version env ^^
+        compile_rel_const I32Op.GtU StableMem.version_no_stable_memory ^^
+        E.else_trap_with env "StableMem.get_version() == version_no_stable_memory" ^^
+
+        (* assert StableMem.get_version() <= StableMem.version_max *)
+        StableMem.get_version env ^^
+        compile_rel_const I32Op.LeU StableMem.version_max ^^
+        E.else_trap_with env "StableMem.get_version() > version_max" ^^
+
+        (* record the version *)
+        StableMem.get_version env ^^
+        StableMem.write_word32 env
+
+      end
+
+  let destabilize env ty save_version =
     match E.mode env with
     | Flags.ICMode | Flags.RefMode ->
       let (set_pages, get_pages) = new_local64 env "pages" in
@@ -8274,8 +8678,8 @@ module OldStabilization = struct
           StableMem.get_mem_size env ^^
           G.i (Test (Wasm.Values.I64 I64Op.Eqz)) ^^
           E.else_trap_with env "StableMem.mem_size non-zero" ^^
-          compile_unboxed_const StableMem.version_stable_heap_no_regions ^^
-          save_version ^^
+          compile_unboxed_const 0l ^^
+          StableMem.set_version env ^^
           Object.lit_raw env fs'
         end
         begin
@@ -8284,7 +8688,7 @@ module OldStabilization = struct
           let (set_len, get_len) = new_local env "len" in
           let (set_offset, get_offset) = new_local64 env "offset" in
           compile_const_64 0L ^^
-          read_and_clear_word32 env ^^
+          StableMem.read_and_clear_word32 env ^^
           set_marker ^^
 
           get_marker ^^
@@ -8305,7 +8709,7 @@ module OldStabilization = struct
               (* read version *)
               get_M ^^
               compile_add64_const (Int64.sub page_size64 4L) ^^
-              read_and_clear_word32 env ^^
+              StableMem.read_and_clear_word32 env ^^
               set_version ^^
               get_version ^^
               save_version ^^
@@ -8322,13 +8726,13 @@ module OldStabilization = struct
               compile_const_64 0L ^^
               get_M ^^
               compile_add64_const (Int64.sub page_size64 8L) ^^
-              read_and_clear_word32 env ^^
-              write_word32 env ^^
+              StableMem.read_and_clear_word32 env ^^
+              StableMem.write_word32 env ^^
 
               (* restore mem_size *)
               get_M ^^
               compile_add64_const (Int64.sub page_size64 12L) ^^
-              extend64 (read_and_clear_word32 env) ^^ (*TODO: use 64 bits *)
+              extend64 (StableMem.read_and_clear_word32 env) ^^ (*TODO: use 64 bits *)
               StableMem.set_mem_size env ^^
 
               StableMem.get_mem_size env ^^
@@ -8337,7 +8741,7 @@ module OldStabilization = struct
 
               (* set len *)
               get_N ^^
-              read_and_clear_word32 env ^^
+              StableMem.read_and_clear_word32 env ^^
               set_len ^^
 
               (* set offset *)
@@ -8382,7 +8786,7 @@ module OldStabilization = struct
 
           (* clear blob contents *)
           get_blob ^^
-          blob_clear env ^^
+          Blob.clear env ^^
 
           (* copy zeros from blob to stable memory *)
           get_offset ^^
@@ -8396,215 +8800,21 @@ module OldStabilization = struct
     | _ -> assert false
 end
 
-(* New stable memory layout with new version.
-   Prevents forward compatibility of old compiled programs that rely on deserialization.
-  If size == 0: empty
-  let end = physical size * page_size
-  If logical size N > 0:
-    [0..4)          0 (first word is backed up at `end-8`)
-    [4..N)          
-            
-    [end-16..end-8) 
-    [end-8..end-4)  
-    [end-4..end)   
-  ending at page boundary
-  Note: The first word must be empty to distinguish this version from the Candid legacy version 0 (which has first word != 0).
-*)
-module NewStableMemory = struct
-  let physical_size env =
-    IC.system_call env "stable64_size" ^^
-    compile_shl64_const (Int64.of_int page_size_bits)
-
-  let store_at_end env offset typ get_value =
-    physical_size env ^^
-    compile_sub64_const offset ^^
-    get_value ^^
-    match typ with
-    | I32Type -> StableMem.write_word32 env
-    | I64Type -> StableMem.write_word64 env
-    | _ -> assert false
-
-  let read_from_end env offset typ =
-    physical_size env ^^
-    compile_sub64_const offset ^^
-    match typ with
-    | I32Type -> StableMem.read_word32 env
-    | I64Type -> StableMem.read_word64 env
-    | _ -> assert false
-
-  let clear_at_end env offset typ =
-    store_at_end env offset typ 
-    (match typ with
-    | I32Type -> compile_unboxed_const 0l
-    | I64Type -> compile_const_64 0L
-    | _ -> assert false
-    )
-
-  let logical_size_offset = 16L
-  let first_word_backup_offset = 8L
-  let version_offset = 4L
-
-  let upgrade_version env =
-    StableMem.set_version env ^^
-    StableMem.get_version env ^^
-    compile_eq_const StableMem.legacy_version_no_stable_memory ^^
-    StableMem.get_version env ^^
-    compile_eq_const StableMem.legacy_version_some_stable_memory ^^
-    G.i (Binary (Wasm.Values.I32 I32Op.Or)) ^^
-    (G.if0
-      (compile_unboxed_const StableMem.version_stable_heap_no_regions ^^
-      StableMem.set_version env)
-      G.nop) ^^
-    StableMem.get_version env ^^
-    compile_eq_const StableMem.legacy_version_regions ^^
-    (G.if0
-      (compile_unboxed_const StableMem.version_stable_heap_regions ^^
-      StableMem.set_version env)
-      G.nop)
-      
-  let grow_size env amount =
-    StableMem.get_mem_size env ^^
-    compile_shl64_const (Int64.of_int page_size_bits) ^^
-    compile_const_64 amount ^^
-    StableMem.ensure env
-
-  let backup env =
-    let (set_first_word, get_first_word) = new_local env "first_word" in
-    physical_size env ^^
-    G.i (Test (Wasm.Values.I64 I64Op.Eqz)) ^^
-    G.if0
-      G.nop
-      begin
-        (* read and clear first word *)
-        compile_const_64 0L ^^ StableMem.read_word32 env ^^ set_first_word ^^
-        compile_const_64 0L ^^ compile_unboxed_const 0l ^^ StableMem.write_word32 env ^^
-
-        grow_size env logical_size_offset ^^
-
-        (* backup logical size *)
-        store_at_end env logical_size_offset I64Type (StableMem.get_mem_size env) ^^
-
-        (* backup first word *)
-        store_at_end env first_word_backup_offset I32Type get_first_word ^^
-
-        (* store the version *)
-        store_at_end env version_offset I32Type (StableMem.get_version env)
-      end
+module GCRoots = struct
+  let register env static_roots =
 
-  let restore env =
-    let (set_first_word, get_first_word) = new_local env "first_word" in
-    physical_size env ^^
-    G.i (Test (Wasm.Values.I64 I64Op.Eqz)) ^^
-    G.if0
-      begin
-        compile_const_64 0L ^^ StableMem.set_mem_size env
-      end
-      begin
-        (* check the version *)
-        read_from_end env version_offset I32Type ^^
-        StableMem.set_version env ^^
-        StableMem.get_version env ^^
-        compile_eq_const StableMem.version_stable_heap_no_regions ^^
-        StableMem.get_version env ^^
-        compile_eq_const StableMem.version_stable_heap_regions ^^
-        G.i (Binary (Wasm.Values.I32 I32Op.Or)) ^^
-        E.else_trap_with env (Printf.sprintf
-          "unsupported stable memory version (expected %s or %s)"
-           (Int32.to_string StableMem.version_stable_heap_no_regions)
-           (Int32.to_string StableMem.version_stable_heap_regions)) ^^
-
-        (* read first word *)
-        read_from_end env first_word_backup_offset I32Type ^^
-        set_first_word ^^
-        
-        (* restore logical size *)
-        read_from_end env logical_size_offset I64Type ^^
-        StableMem.set_mem_size env ^^
-
-        (* clear size and version *)
-        clear_at_end env logical_size_offset I64Type ^^
-        clear_at_end env first_word_backup_offset I32Type ^^
-        clear_at_end env version_offset I32Type ^^
-
-        (* restore first word *)
-        compile_const_64 0L ^^ get_first_word ^^ StableMem.write_word32 env
-      end
-end
+    let get_static_roots = E.add_fun env "get_static_roots" (Func.of_body env [] [I32Type] (fun env ->
+      compile_unboxed_const static_roots
+    )) in
 
-module Persistence = struct
-  let load_stable_actor env = E.call_import env "rts" "load_stable_actor"
-    
-  let save_stable_actor env = E.call_import env "rts" "save_stable_actor"
-
-  let free_stable_actor env = E.call_import env "rts" "free_stable_actor"
-
-  let register_stable_type env actor_type =
-    let (candid_type_desc, type_offsets, type_indices) = Serialization.(type_desc env Persistence [actor_type]) in
-    let serialized_offsets = StaticBytes.(as_bytes [i32s (List.map Int32.of_int type_offsets)]) in
-    assert (type_indices = [0l]);
-    Blob.lit env Tagged.B candid_type_desc ^^
-    Blob.lit env Tagged.B serialized_offsets ^^
-    E.call_import env "rts" "register_stable_type"
-
-  let create_actor env actor_type get_field_value =
-    let (_, field_declarations) = Type.as_obj actor_type in
-    let field_initializers = List.map
-      (fun field -> (field.Type.lab, fun () -> (get_field_value field)))
-      field_declarations
-    in
-    Object.lit_raw env field_initializers
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "get_static_roots";
+      edesc = nr (FuncExport (nr get_static_roots))
+    })
 
-  let recover_actor env actor_type =
-    let load_old_field env field =
-      if field.Type.typ = Type.(Opt Any) then
-        (* A stable variable may have been promoted to type `Any`: Therefore, drop its former content. *)
-        Opt.inject env (Tuple.compile_unit env)
-      else
-        (load_stable_actor env ^^ Object.load_idx_raw env field.Type.lab) in
-    let recover_field field = 
-      load_stable_actor env ^^
-      Object.contains_field env field.Type.lab ^^
-      (G.if1 I32Type
-        (load_old_field env field)
-        (Opt.null_lit env)
-      ) in
-    create_actor env actor_type recover_field ^^
-    free_stable_actor env
-
-  let save env actor_type =
-    save_stable_actor env ^^
-    NewStableMemory.backup env ^^
-    UpgradeStatistics.set_instructions env
-
-  let load env actor_type =
-    register_stable_type env actor_type ^^
-    load_stable_actor env ^^
-    G.i (Test (Wasm.Values.I32 I32Op.Eqz)) ^^
-    (G.if1 I32Type
-      begin
-        OldStabilization.old_destabilize env actor_type (NewStableMemory.upgrade_version env)
-      end
-      begin
-        recover_actor env actor_type ^^
-        NewStableMemory.restore env
-      end) ^^
-    StableMem.region_init env ^^
-    UpgradeStatistics.add_instructions env
-end
+  let store_static_roots env =
+    Arr.vanilla_lit env Tagged.M (E.get_static_roots env)
 
-module GCRoots = struct
-  let register_static_variables env =
-    E.(env.object_pool.frozen) := true;
-    Func.share_code0 Func.Always env "initialize_root_array" [] (fun env ->
-      let length = Int32.of_int (E.object_pool_size env) in
-      compile_unboxed_const length ^^
-      E.call_import env "rts" "initialize_static_variables" ^^
-      E.iterate_object_pool env (fun index allocation ->
-        compile_unboxed_const (Int32.of_int index) ^^
-        allocation env ^^
-        E.call_import env "rts" "set_static_variable"
-      )
-    )
 end (* GCRoots *)
 
 module StackRep = struct
@@ -8685,38 +8895,49 @@ module StackRep = struct
     | UnboxedTuple n -> G.table n (fun _ -> G.i Drop)
     | Const _ | Unreachable -> G.nop
 
-  let rec build_constant env = function
-  | Const.Lit (Const.Vanilla value) -> E.Vanilla value
-  | Const.Lit (Const.Bool number) -> E.Vanilla (Bool.vanilla_lit number)
-  | Const.Lit (Const.Text payload) -> Blob.constant env Tagged.T payload
-  | Const.Lit (Const.Blob payload) -> Blob.constant env Tagged.B payload
-  | Const.Lit (Const.Null) -> E.Vanilla Opt.null_vanilla_lit
-  | Const.Lit (Const.BigInt number) -> BigNum.constant env number
-  | Const.Lit (Const.Word32 (pty, number)) -> BoxedSmallWord.constant env pty number
-  | Const.Lit (Const.Word64 (pty, number)) -> BoxedWord64.constant env pty number
-  | Const.Lit (Const.Float64 number) -> Float.constant env number
-  | Const.Opt value -> Opt.constant env (build_constant env value)
-  | Const.Fun (_, get_fi, _) -> Closure.constant env get_fi
-  | Const.Message _ -> assert false
-  | Const.Unit -> E.Vanilla (Tuple.unit_vanilla_lit env)
-  | Const.Tag (tag, value) ->
-      let payload = build_constant env value in
-      Tagged.shared_object env (fun env ->
-        let materialized_payload = Tagged.materialize_shared_value env payload in
-        Variant.inject env tag materialized_payload
-      )
-  | Const.Array elements -> 
-      let constant_elements = List.map (build_constant env) elements in
-      Arr.constant env Tagged.I constant_elements
-  | Const.Tuple elements -> 
-      let constant_elements = List.map (build_constant env) elements in
-      Arr.constant env Tagged.T constant_elements
-  | Const.Obj fields ->
-      let constant_fields = List.map (fun (name, value) -> (name, build_constant env value)) fields in
-      Object.constant env constant_fields
-
-  let materialize_constant env value =
-    Tagged.materialize_shared_value env (build_constant env value)
+  (* Materializes a Const.lit: If necessary, puts
+     bytes into static memory, and returns a vanilla value.
+  *)
+  let materialize_lit env (lit : Const.lit) : int32 =
+    match lit with
+    | Const.Vanilla n  -> n
+    | Const.Bool n     -> Bool.vanilla_lit n
+    | Const.BigInt n   -> BigNum.vanilla_lit env n
+    | Const.Word32 (pty, n) -> BoxedSmallWord.vanilla_lit env pty n
+    | Const.Word64 (pty, n) -> BoxedWord64.vanilla_lit env pty n
+    | Const.Float64 f  -> Float.vanilla_lit env f
+    | Const.Text t -> Blob.vanilla_lit env Tagged.T t
+    | Const.Blob t -> Blob.vanilla_lit env Tagged.B t
+    | Const.Null       -> Opt.null_vanilla_lit env
+
+  let rec materialize_const_t env (p, cv) : int32 =
+    Lib.Promise.lazy_value p (fun () -> materialize_const_v env cv)
+
+  and materialize_const_v env = function
+    | Const.Fun (get_fi, _) -> Closure.static_closure env (get_fi ())
+    | Const.Message fi -> assert false
+    | Const.Obj fs ->
+      let fs' = List.map (fun (n, c) -> (n, materialize_const_t env c)) fs in
+      Object.vanilla_lit env fs'
+    | Const.Unit -> Tuple.unit_vanilla_lit env
+    | Const.Array cs ->
+      let ptrs = List.map (materialize_const_t env) cs in
+      Arr.vanilla_lit env Tagged.I ptrs
+    | Const.Tuple cs ->
+      let ptrs = List.map (materialize_const_t env) cs in
+      Arr.vanilla_lit env Tagged.T ptrs
+    | Const.Tag (i, c) ->
+      let ptr = materialize_const_t env c in
+      Variant.vanilla_lit env i ptr
+    | Const.Lit l -> materialize_lit env l
+    | Const.Opt c ->
+      let rec kernel = Const.(function
+        | (_, Lit Null) -> None
+        | (_, Opt c) -> kernel c
+        | (_, other) -> Some (materialize_const_v env other)) in
+      match kernel c with
+      | Some ptr -> ptr
+      | None -> Opt.vanilla_lit env (materialize_const_t env c)
 
   let adjust env (sr_in : t) sr_out =
     if eq sr_in sr_out
@@ -8753,20 +8974,20 @@ module StackRep = struct
     | UnboxedFloat64, Vanilla -> Float.box env
     | Vanilla, UnboxedFloat64 -> Float.unbox env
 
-    | Const value, Vanilla -> 
-        materialize_constant env value
-    | Const Const.Lit (Const.Vanilla n), UnboxedWord32 ty ->
-        compile_unboxed_const n ^^
-        TaggedSmallWord.untag env ty
-    | Const Const.Lit (Const.Word32 (ty1, n)), UnboxedWord32 ty2 when ty1 = ty2 -> 
-        compile_unboxed_const n
-    | Const Const.Lit (Const.Word64 (ty1, n)), UnboxedWord64 ty2 when ty1 = ty2 -> 
-        compile_const_64 n
-    | Const Const.Lit (Const.Float64 f), UnboxedFloat64 -> Float.compile_unboxed_const f
+    | Const (_, Const.Lit (Const.Bool b)), Vanilla -> Bool.lit b
+    | Const c, Vanilla -> compile_unboxed_const (materialize_const_t env c)
+    | Const (_, Const.Lit (Const.Vanilla n)), UnboxedWord32 ty ->
+       compile_unboxed_const n ^^
+       TaggedSmallWord.untag env ty
+    | Const (_, Const.Lit (Const.Word32 (ty1, n))), UnboxedWord32 ty2 when ty1 = ty2 ->
+       compile_unboxed_const n
+    | Const (_, Const.Lit (Const.Word64 (ty1, n))), UnboxedWord64 ty2 when ty1 = ty2 ->
+       compile_const_64 n
+    | Const (_, Const.Lit (Const.Float64 f)), UnboxedFloat64 -> Float.compile_unboxed_const f
     | Const c, UnboxedTuple 0 -> G.nop
-    | Const Const.Tuple cs, UnboxedTuple n ->
+    | Const (_, Const.Tuple cs), UnboxedTuple n ->
       assert (n = List.length cs);
-      G.concat_map (fun c -> materialize_constant env c) cs
+      G.concat_map (fun c -> compile_unboxed_const (materialize_const_t env c)) cs
     | _, _ ->
       Printf.eprintf "Unknown stack_rep conversion %s -> %s\n"
         (to_string sr_in) (to_string sr_out);
@@ -8785,18 +9006,18 @@ module VarEnv = struct
     (* A Wasm Local of the current function, that points to memory location,
        which is a MutBox.  Used for mutable captured data *)
     | HeapInd of int32
-    (* A static variable accessed by an index via the runtime system, refers to a MutBox,
-       belonging to the GC root set *)
-    | Static of int32
-    (* Constant literals can reside in dynamic heap *)
-    | Const of Const.v
+    (* A static mutable memory location (static address of a MutBox object) *)
+    (* TODO: Do we need static immutable? *)
+    | HeapStatic of int32
+    (* Not materialized (yet), statically known constant, static location on demand *)
+    | Const of Const.t
     (* public method *)
     | PublicMethod of int32 * string
 
   let is_non_local : varloc -> bool = function
     | Local _
     | HeapInd _ -> false
-    | Static _
+    | HeapStatic _
     | PublicMethod _
     | Const _ -> true
 
@@ -8856,8 +9077,8 @@ module VarEnv = struct
       E.add_local_name env i name;
       ({ ae with vars = NameEnv.add name ((HeapInd i), typ) ae.vars }, i)
 
-  let add_static_variable (ae : t) name index typ =
-      { ae with vars = NameEnv.add name ((Static index), typ) ae.vars }
+  let add_local_heap_static (ae : t) name ptr typ =
+      { ae with vars = NameEnv.add name ((HeapStatic ptr), typ) ae.vars }
 
   let add_local_public_method (ae : t) name (fi, exported_name) typ =
       { ae with vars = NameEnv.add name ((PublicMethod (fi, exported_name) : varloc), typ) ae.vars }
@@ -8882,9 +9103,9 @@ module VarEnv = struct
         E.add_local_name env i name;
         let ae' = { ae with vars = NameEnv.add name ((Local (SR.Vanilla, i)), typ) ae.vars } in
         add_arguments env ae' as_local remainder
-      else
-        let index = MutBox.add_global_mutbox env in
-        let ae' = add_static_variable ae name index typ in
+      else (* needs to go to static memory *)
+        let ptr = MutBox.static env in
+        let ae' = add_local_heap_static ae name ptr typ in
         add_arguments env ae' as_local remainder
 
   let add_argument_locals env (ae : t) =
@@ -8926,36 +9147,54 @@ module Var = struct
 
   (* Returns desired stack representation, preparation code and code to consume
      the value onto the stack *)
-  let set_val env ae var : G.t * SR.t * G.t = match VarEnv.lookup ae var with
-    | Some ((Local (sr, i)), _) ->
+  let set_val env ae var : G.t * SR.t * G.t = match (VarEnv.lookup ae var, !Flags.gc_strategy) with
+    | (Some ((Local (sr, i)), _), _) ->
       G.nop,
       sr,
       G.i (LocalSet (nr i))
-    | Some ((HeapInd i), typ) when potential_pointer typ ->
+    | (Some ((HeapInd i), typ), Flags.Generational) when potential_pointer typ ->
+      G.i (LocalGet (nr i)),
+      SR.Vanilla,
+      MutBox.store_field env ^^
+      G.i (LocalGet (nr i)) ^^
+      Tagged.load_forwarding_pointer env ^^ (* not needed for this GC, but only for forward pointer sanity checks *)
+      compile_add_const ptr_unskew ^^
+      compile_add_const (Int32.mul (MutBox.field env) Heap.word_size) ^^
+      E.call_import env "rts" "post_write_barrier"
+    | (Some ((HeapInd i), typ), Flags.Incremental) when potential_pointer typ ->
       G.i (LocalGet (nr i)) ^^
       Tagged.load_forwarding_pointer env ^^
       compile_add_const ptr_unskew ^^
-      compile_add_const (Int32.mul MutBox.field Heap.word_size),
+      compile_add_const (Int32.mul (MutBox.field env) Heap.word_size),
       SR.Vanilla,
       Tagged.write_with_barrier env
-    | Some ((HeapInd i), typ) ->
+    | (Some ((HeapInd i), typ), _) ->
       G.i (LocalGet (nr i)),
       SR.Vanilla,
       MutBox.store_field env
-    | Some ((Static index), typ) when potential_pointer typ ->
-      Heap.get_static_variable env index ^^
+    | (Some ((HeapStatic ptr), typ), Flags.Generational) when potential_pointer typ ->
+      compile_unboxed_const ptr,
+      SR.Vanilla,
+      MutBox.store_field env ^^
+      compile_unboxed_const ptr ^^
+      Tagged.load_forwarding_pointer env ^^ (* not needed for this GC, but only for forward pointer sanity checks *)
+      compile_add_const ptr_unskew ^^
+      compile_add_const (Int32.mul (MutBox.field env) Heap.word_size) ^^
+      E.call_import env "rts" "post_write_barrier"
+    | (Some ((HeapStatic ptr), typ), Flags.Incremental) when potential_pointer typ ->
+      compile_unboxed_const ptr ^^
       Tagged.load_forwarding_pointer env ^^
       compile_add_const ptr_unskew ^^
-      compile_add_const (Int32.mul MutBox.field Heap.word_size),
+      compile_add_const (Int32.mul (MutBox.field env) Heap.word_size),
       SR.Vanilla,
       Tagged.write_with_barrier env
-    | Some ((Static index), typ) ->
-      Heap.get_static_variable env index,
+    | (Some ((HeapStatic ptr), typ), _) ->
+      compile_unboxed_const ptr,
       SR.Vanilla,
       MutBox.store_field env
-    | Some ((Const _), _) -> fatal "set_val: %s is const" var
-    | Some ((PublicMethod _), _) -> fatal "set_val: %s is PublicMethod" var
-    | None -> fatal "set_val: %s missing" var
+    | (Some ((Const _), _), _) -> fatal "set_val: %s is const" var
+    | (Some ((PublicMethod _), _), _) -> fatal "set_val: %s is PublicMethod" var
+    | (None, _)   -> fatal "set_val: %s missing" var
 
   (* Stores the payload. Returns stack preparation code, and code that consumes the values from the stack *)
   let set_val_vanilla env ae var : G.t * G.t =
@@ -8981,10 +9220,8 @@ module Var = struct
       sr, G.i (LocalGet (nr i))
     | Some (HeapInd i) ->
       SR.Vanilla, G.i (LocalGet (nr i)) ^^ MutBox.load_field env
-    | Some (Static index) ->
-      SR.Vanilla, 
-      Heap.get_static_variable env index ^^
-      MutBox.load_field env
+    | Some (HeapStatic i) ->
+      SR.Vanilla, compile_unboxed_const i ^^ MutBox.load_field env
     | Some (Const c) ->
       SR.Const c, G.nop
     | Some (PublicMethod (_, name)) ->
@@ -9025,12 +9262,12 @@ module Var = struct
      In the IR, mutable fields of objects are pre-allocated as MutBox objects,
      to allow the async/await.
      So we expect the variable to be in a HeapInd (pointer to MutBox on the heap),
-     or Static (static variable represented as a MutBox that is accessed via the 
-     runtime system) and we use the pointer.
+     or HeapStatic (statically known MutBox in the static memory) and we use
+     the pointer.
   *)
   let get_aliased_box env ae var = match VarEnv.lookup_var ae var with
     | Some (HeapInd i) -> G.i (LocalGet (nr i))
-    | Some (Static index) -> Heap.get_static_variable env index
+    | Some (HeapStatic i) -> compile_unboxed_const i
     | _ -> assert false
 
   let capture_aliased_box env ae var = match VarEnv.lookup_var ae var with
@@ -9047,7 +9284,7 @@ end (* Var *)
 module Internals = struct
   let call_prelude_function env ae var =
     match VarEnv.lookup_var ae var with
-    | Some (VarEnv.Const Const.Fun (_, mk_fi, _)) ->
+    | Some (VarEnv.Const (_, Const.Fun (mk_fi, _))) ->
        compile_unboxed_zero ^^ (* A dummy closure *)
        G.i (Call (nr (mk_fi ())))
     | _ -> assert false
@@ -9154,14 +9391,13 @@ module FuncDec = struct
     if Type.is_shared_sort sort
     then begin
       let (fi, fill) = E.reserve_fun pre_env name in
-      ( Const.Message fi, fun env ae ->
+      ( Const.t_of_v (Const.Message fi), fun env ae ->
         fill (compile_const_message env ae sort control args mk_body ret_tys at)
       )
     end else begin
       assert (control = Type.Returns);
       let lf = E.make_lazy_function pre_env name in
-      let fun_id = E.get_constant_function_id pre_env in
-      ( Const.Fun (fun_id, (fun () -> Lib.AllocOnUse.use lf), fun_rhs), fun env ae ->
+      ( Const.t_of_v (Const.Fun ((fun () -> Lib.AllocOnUse.use lf), fun_rhs)), fun env ae ->
         let restore_no_env _env ae _ = ae, unmodified in
         Lib.AllocOnUse.def lf (lazy (compile_local_function env ae restore_no_env args mk_body ret_tys at))
       )
@@ -9206,18 +9442,18 @@ module FuncDec = struct
 
       let code =
         (* Allocate a heap object for the closure *)
-        Tagged.alloc env (Int32.add Closure.header_size len) Tagged.Closure ^^
+        Tagged.alloc env (Int32.add (Closure.header_size env) len) Tagged.Closure ^^
         set_clos ^^
 
         (* Store the function pointer number: *)
         get_clos ^^
         compile_unboxed_const (E.add_fun_ptr env fi) ^^
-        Tagged.store_field env Closure.funptr_field ^^
+        Tagged.store_field env (Closure.funptr_field env) ^^
 
         (* Store the length *)
         get_clos ^^
         compile_unboxed_const len ^^
-        Tagged.store_field env Closure.len_field ^^
+        Tagged.store_field env (Closure.len_field env) ^^
 
         (* Store all captured values *)
         store_env ^^
@@ -9536,7 +9772,7 @@ module FuncDec = struct
         *)
         (* Instead, just ignore the argument and
            send a *statically* allocated, nullary reply *)
-        Blob.lit_ptr_len env Tagged.B "DIDL\x00\x00" ^^
+        Blob.lit_ptr_len env "DIDL\x00\x00" ^^
         IC.reply_with_data env ^^
         (* Finally, act like
         message_cleanup env (Type.Shared Type.Write)
@@ -9760,7 +9996,7 @@ module AllocHow = struct
   let how_of_ae ae : allocHow =
     M.map (fun (l, _) -> match l with
     | VarEnv.Const _        -> (Const : how)
-    | VarEnv.Static _       -> StoreStatic
+    | VarEnv.HeapStatic _   -> StoreStatic
     | VarEnv.HeapInd _      -> StoreHeap
     | VarEnv.Local (sr, _)  -> LocalMut sr (* conservatively assume mutable *)
     | VarEnv.PublicMethod _ -> LocalMut SR.Vanilla
@@ -9794,8 +10030,8 @@ module AllocHow = struct
       let alloc_code = MutBox.alloc env ^^ G.i (LocalSet (nr i)) in
       (ae1, alloc_code)
     | StoreStatic ->
-      let index = MutBox.add_global_mutbox env in
-      let ae1 = VarEnv.add_static_variable ae name index typ in
+      let ptr = MutBox.static env in
+      let ae1 = VarEnv.add_local_heap_static ae name ptr typ in
       (ae1, G.nop)
 
   let add_local_for_alias env ae how name typ : VarEnv.t * G.t =
@@ -9843,7 +10079,7 @@ let const_lit_of_lit : Ir.lit -> Const.lit = function
   | FloatLit f    -> Const.Float64 f
 
 let const_of_lit lit =
-  Const.Lit (const_lit_of_lit lit)
+  Const.t_of_v (Const.Lit (const_lit_of_lit lit))
 
 let compile_lit lit =
   SR.Const (const_of_lit lit), G.nop
@@ -10463,25 +10699,46 @@ let compile_load_field env typ name =
 *)
 let rec compile_lexp (env : E.t) ae lexp : G.t * SR.t * G.t =
   (fun (code, sr, fill_code) -> G.(with_region lexp.at code, sr, with_region lexp.at fill_code)) @@
-  match lexp.it with
-  | VarLE var -> Var.set_val env ae var
-  | IdxLE (e1, e2) when potential_pointer (Arr.element_type env e1.note.Note.typ) ->
+  match lexp.it, !Flags.gc_strategy with
+  | VarLE var, _ -> Var.set_val env ae var
+  | IdxLE (e1, e2), Flags.Generational when potential_pointer (Arr.element_type env e1.note.Note.typ) ->
+    let (set_field, get_field) = new_local env "field" in
+    compile_array_index env ae e1 e2 ^^
+    set_field ^^ (* peepholes to tee *)
+    get_field,
+    SR.Vanilla,
+    store_ptr ^^
+    get_field ^^
+    compile_add_const ptr_unskew ^^
+    E.call_import env "rts" "post_write_barrier"
+  | IdxLE (e1, e2), Flags.Incremental when potential_pointer (Arr.element_type env e1.note.Note.typ) ->
     compile_array_index env ae e1 e2 ^^
     compile_add_const ptr_unskew,
     SR.Vanilla,
     Tagged.write_with_barrier env
-  | IdxLE (e1, e2) ->
+  | IdxLE (e1, e2), _ ->
     compile_array_index env ae e1 e2,
     SR.Vanilla,
     store_ptr
-  | DotLE (e, n) when potential_pointer (Object.field_type env e.note.Note.typ n) ->
+  | DotLE (e, n), Flags.Generational when potential_pointer (Object.field_type env e.note.Note.typ n) ->
+    let (set_field, get_field) = new_local env "field" in
+    compile_exp_vanilla env ae e ^^
+    Object.idx env e.note.Note.typ n ^^
+    set_field ^^ (* peepholes to tee *)
+    get_field,
+    SR.Vanilla,
+    store_ptr ^^
+    get_field ^^
+    compile_add_const ptr_unskew ^^
+    E.call_import env "rts" "post_write_barrier"
+  | DotLE (e, n), Flags.Incremental when potential_pointer (Object.field_type env e.note.Note.typ n) ->
     compile_exp_vanilla env ae e ^^
     (* Only real objects have mutable fields, no need to branch on the tag *)
     Object.idx env e.note.Note.typ n ^^
     compile_add_const ptr_unskew,
     SR.Vanilla,
     Tagged.write_with_barrier env
-  | DotLE (e, n) ->
+  | DotLE (e, n), _ ->
     compile_exp_vanilla env ae e ^^
     (* Only real objects have mutable fields, no need to branch on the tag *)
     Object.idx env e.note.Note.typ n,
@@ -10514,7 +10771,7 @@ and compile_prim_invocation (env : E.t) ae p es at =
 
     (* we duplicate this pattern match to emulate pattern guards *)
     let call_as_prim = match fun_sr, sort with
-      | SR.Const Const.Fun (_, mk_fi, Const.PrimWrapper prim), _ ->
+      | SR.Const (_, Const.Fun (mk_fi, Const.PrimWrapper prim)), _ ->
          begin match n_args, e2.it with
          | 0, _ -> true
          | 1, _ -> true
@@ -10524,7 +10781,7 @@ and compile_prim_invocation (env : E.t) ae p es at =
       | _ -> false in
 
     begin match fun_sr, sort with
-      | SR.Const Const.Fun (_, mk_fi, Const.PrimWrapper prim), _ when call_as_prim ->
+      | SR.Const (_, Const.Fun (mk_fi, Const.PrimWrapper prim)), _ when call_as_prim ->
          assert (sort = Type.Local);
          (* Handle argument tuples *)
          begin match n_args, e2.it with
@@ -10543,7 +10800,7 @@ and compile_prim_invocation (env : E.t) ae p es at =
            (* ugly case; let's just call this as a function for now *)
            raise (Invalid_argument "call_as_prim was true?")
          end
-      | SR.Const Const.Fun (_, mk_fi, _), _ ->
+      | SR.Const (_, Const.Fun (mk_fi, _)), _ ->
          assert (sort = Type.Local);
          StackRep.of_arity return_arity,
 
@@ -10624,7 +10881,7 @@ and compile_prim_invocation (env : E.t) ae p es at =
   | DotPrim name, [e] ->
     let sr, code1 = compile_exp env ae e in
     begin match sr with
-    | SR.Const Const.Obj fs ->
+    | SR.Const (_, Const.Obj fs) ->
       let c = List.assoc name fs in
       SR.Const c, code1
     | _ ->
@@ -10672,9 +10929,9 @@ and compile_prim_invocation (env : E.t) ae p es at =
        fields, effectively arriving at the desired element *)
     G.i (Binary (Wasm.Values.I32 I32Op.Add)) ^^
     (* Not using Tagged.load_field since it is not a proper pointer to the array start *)
-    Heap.load_field Arr.header_size (* loads the element at the byte offset *)
+    Heap.load_field (Arr.header_size env) (* loads the element at the byte offset *)
   | GetLastArrayOffset, [e] ->
-    assert (BitTagged.can_tag_const Type.Int (Int64.of_int32 (Int32.sub Arr.max_array_size 1l)));
+    assert (BitTagged.can_tag_const Type.Int (Int64.of_int32 (Int32.sub (Arr.max_array_size env) 1l)));
     SR.Vanilla,
     compile_exp_vanilla env ae e ^^ (* array *)
     Arr.len env ^^
@@ -11006,8 +11263,14 @@ and compile_prim_invocation (env : E.t) ae p es at =
     GC.collect_garbage env
 
   | ICStableSize t, [e] ->
-    SR.UnboxedWord64 Type.Nat64,
-    E.trap_with env "Deprecated with enhanced orthogonal persistence"
+    SR.UnboxedWord64  Type.Nat64,
+    let (tydesc, _, _) = Serialization.type_desc env [t] in
+    let tydesc_len = Int32.of_int (String.length tydesc) in
+    compile_exp_vanilla env ae e ^^
+    Serialization.buffer_size env t ^^
+    G.i Drop ^^
+    compile_add_const tydesc_len ^^
+    G.i (Convert (Wasm.Values.I64 I64Op.ExtendUI32))
 
   (* Other prims, unary *)
 
@@ -11230,10 +11493,6 @@ and compile_prim_invocation (env : E.t) ae p es at =
     SR.Vanilla,
     GC.get_collector_instructions env ^^ BigNum.from_word64 env
 
-  | OtherPrim "rts_upgrade_instructions", [] ->
-    SR.Vanilla,
-    UpgradeStatistics.get_upgrade_instructions env ^^ BigNum.from_word64 env
-
   | OtherPrim "rts_stable_memory_size", [] ->
     SR.Vanilla,
     StableMem.stable64_size env ^^ BigNum.from_word64 env
@@ -11270,7 +11529,8 @@ and compile_prim_invocation (env : E.t) ae p es at =
     compile_exp_as env ae SR.Vanilla e0 ^^
     compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e1 ^^
     compile_exp_as env ae SR.Vanilla e2 ^^
-    BigNum.to_word32_with env (Blob.lit env Tagged.T "Blob size out of bounds") ^^
+    Blob.lit env Tagged.T "Blob size out of bounds" ^^
+    BigNum.to_word32_with env ^^
     Region.load_blob env
 
   | OtherPrim ("regionStoreBlob"), [e0; e1; e2] ->
@@ -11616,7 +11876,8 @@ and compile_prim_invocation (env : E.t) ae p es at =
     SR.Vanilla,
     compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e1 ^^
     compile_exp_as env ae SR.Vanilla e2 ^^
-    BigNum.to_word32_with env (Blob.lit env Tagged.T "Blob size out of bounds") ^^
+    Blob.lit env Tagged.T "Blob size out of bounds" ^^
+    BigNum.to_word32_with env ^^
     StableMemoryInterface.load_blob env
 
   | OtherPrim "stableMemoryStoreBlob", [e1; e2] ->
@@ -11777,12 +12038,23 @@ and compile_prim_invocation (env : E.t) ae p es at =
     SR.Vanilla, IC.method_name env
 
   | ICStableRead ty, [] ->
+    (*
+      * On initial install:
+        1. return record of nulls
+      * On upgrade:
+        1. deserialize stable store to v : ty,
+        2. possibly run region manager initialization logic.
+        3. return v
+    *)
     SR.Vanilla,
-    Persistence.load env ty
-  | ICStableWrite ty, [e] ->
+    Stabilization.destabilize env ty (StableMem.set_version env) ^^
+    compile_unboxed_const (if !Flags.use_stable_regions then 1l else 0l) ^^
+    E.call_import env "rts" "region_init"
+
+  | ICStableWrite ty, [] ->
     SR.unit,
-    compile_exp_vanilla env ae e ^^
-    Persistence.save env ty
+    IC.get_actor_to_persist env ^^
+    Stabilization.stabilize env ty
 
   (* Cycles *)
   | SystemCyclesBalancePrim, [] ->
@@ -12087,7 +12359,8 @@ enabled mutual recursion.
 and compile_lit_pat env l =
   match l with
   | NullLit ->
-    Opt.is_null env
+    compile_lit_as env SR.Vanilla l ^^
+    G.i (Compare (Wasm.Values.I32 I32Op.Eq))
   | BoolLit true ->
     G.nop
   | BoolLit false ->
@@ -12292,7 +12565,7 @@ and compile_dec env pre_ae how v2en dec : VarEnv.t * G.t * (VarEnv.t -> scope_wr
   | LetD ({it = VarP v; _}, e) when E.NameEnv.mem v v2en ->
     let (const, fill) = compile_const_exp env pre_ae e in
     let fi = match const with
-      | Const.Message fi -> fi
+      | (_, Const.Message fi) -> fi
       | _ -> assert false in
     let pre_ae1 = VarEnv.add_local_public_method pre_ae v (fi, (E.NameEnv.find v v2en)) e.note.Note.typ in
     G.( pre_ae1, nop, (fun ae -> fill env ae; nop), unmodified)
@@ -12362,7 +12635,7 @@ and compile_decs env ae decs captured_in_body : VarEnv.t * scope_wrap =
 (* This compiles expressions determined to be const as per the analysis in
    ir_passes/const.ml. See there for more details.
 *)
-and compile_const_exp env pre_ae exp : Const.v * (E.t -> VarEnv.t -> unit) =
+and compile_const_exp env pre_ae exp : Const.t * (E.t -> VarEnv.t -> unit) =
   match exp.it with
   | FuncE (name, sort, control, typ_binds, args, res_tys, e) ->
     let fun_rhs =
@@ -12418,37 +12691,37 @@ and compile_const_exp env pre_ae exp : Const.v * (E.t -> VarEnv.t -> unit) =
             | _ -> fatal "compile_const_exp/ObjE: \"%s\" not found" f.it.var
           in f.it.name, st) fs
     in
-    (Const.Obj static_fs), fun _ _ -> ()
+    (Const.t_of_v (Const.Obj static_fs), fun _ _ -> ())
   | PrimE (DotPrim name, [e]) ->
     let (object_ct, fill) = compile_const_exp env pre_ae e in
     let fs = match object_ct with
-      | Const.Obj fs -> fs
+      | _, Const.Obj fs -> fs
       | _ -> fatal "compile_const_exp/DotE: not a static object" in
     let member_ct = List.assoc name fs in
     (member_ct, fill)
   | PrimE (ProjPrim i, [e]) ->
     let (object_ct, fill) = compile_const_exp env pre_ae e in
     let cs = match object_ct with
-      | Const.Tuple cs -> cs
+      | _, Const.Tuple cs -> cs
       | _ -> fatal "compile_const_exp/ProjE: not a static tuple" in
     (List.nth cs i, fill)
-  | LitE l -> Const.(Lit (const_lit_of_lit l)), (fun _ _ -> ())
-  | PrimE (TupPrim, []) -> Const.Unit, (fun _ _ -> ())
+  | LitE l -> Const.(t_of_v (Lit (const_lit_of_lit l))), (fun _ _ -> ())
+  | PrimE (TupPrim, []) -> Const.t_of_v Const.Unit, (fun _ _ -> ())
   | PrimE (ArrayPrim (Const, _), es) ->
     let (cs, fills) = List.split (List.map (compile_const_exp env pre_ae) es) in
-    (Const.Array cs),
+    Const.(t_of_v (Array cs)),
     (fun env ae -> List.iter (fun fill -> fill env ae) fills)
   | PrimE (TupPrim, es) ->
     let (cs, fills) = List.split (List.map (compile_const_exp env pre_ae) es) in
-    (Const.Tuple cs),
+    Const.(t_of_v (Tuple cs)),
     (fun env ae -> List.iter (fun fill -> fill env ae) fills)
   | PrimE (TagPrim i, [e]) ->
     let (arg_ct, fill) = compile_const_exp env pre_ae e in
-    (Const.Tag (i, arg_ct)),
+    Const.(t_of_v (Tag (i, arg_ct))),
     fill
   | PrimE (OptPrim, [e]) ->
     let (arg_ct, fill) = compile_const_exp env pre_ae e in
-    (Const.Opt arg_ct),
+    Const.(t_of_v (Opt arg_ct)),
     fill
 
   | _ -> assert false
@@ -12474,7 +12747,7 @@ and destruct_const_pat ae pat const : VarEnv.t option = match pat.it with
   | WildP -> Some ae
   | VarP v -> Some (VarEnv.add_local_const ae v const pat.note)
   | ObjP pfs ->
-    let fs = match const with Const.Obj fs -> fs | _ -> assert false in
+    let fs = match const with (_, Const.Obj fs) -> fs | _ -> assert false in
     List.fold_left (fun ae (pf : pat_field) ->
       match ae, List.find_opt (fun (n, _) -> pf.it.name = n) fs with
       | None, _ -> None
@@ -12486,26 +12759,26 @@ and destruct_const_pat ae pat const : VarEnv.t option = match pat.it with
     if l = None then destruct_const_pat ae p2 const
     else l
   | TupP ps ->
-    let cs = match const with Const.Tuple cs -> cs | Const.Unit -> [] | _ -> assert false in
+    let cs = match const with (_, Const.Tuple cs) -> cs | (_, Const.Unit) -> [] | _ -> assert false in
     let go ae p c = match ae with
       | Some ae -> destruct_const_pat ae p c
       | _ -> None in
     List.fold_left2 go (Some ae) ps cs
   | LitP lp ->
     begin match const with
-    | Const.Lit lc when Const.lit_eq (const_lit_of_lit lp) lc -> Some ae
+    | (_, Const.Lit lc) when Const.lit_eq (const_lit_of_lit lp, lc) -> Some ae
     | _ -> None
     end
   | OptP p ->
     begin match const with
-      | Const.Opt c -> destruct_const_pat ae p c
-      | Const.(Lit Null) -> None
+      | (_, Const.Opt c) -> destruct_const_pat ae p c
+      | (_, Const.(Lit Null)) -> None
       | _ -> assert false
     end
   | TagP (i, p) ->
      match const with
-     | Const.Tag (ic, c) when i = ic -> destruct_const_pat ae p c
-     | Const.Tag _ -> None
+     | (_, Const.Tag (ic, c)) when i = ic -> destruct_const_pat ae p c
+     | (_, Const.Tag _) -> None
      | _ -> assert false
 
 and compile_const_dec env pre_ae dec : (VarEnv.t -> VarEnv.t) * (E.t -> VarEnv.t -> unit) =
@@ -12534,7 +12807,7 @@ and compile_init_func mod_env ((cu, flavor) : Ir.prog) =
       let _ae, codeW = compile_decs env VarEnv.empty_ae ds Freevars.S.empty in
       codeW G.nop
     )
-  | ActorU (as_opt, ds, fs, up, _t) ->
+  | ActorU (as_opt, ds, fs, up, t) ->
     main_actor as_opt mod_env ds fs up
 
 and export_actor_field env  ae (f : Ir.field) =
@@ -12562,6 +12835,7 @@ and export_actor_field env  ae (f : Ir.field) =
 
 (* Main actor *)
 and main_actor as_opt mod_env ds fs up =
+  let build_stable_actor = up.stable_record in
   Func.define_built_in mod_env "init" [] [] (fun env ->
     let ae0 = VarEnv.empty_ae in
 
@@ -12620,6 +12894,11 @@ and main_actor as_opt mod_env ds fs up =
        IC.export_inspect env;
     end;
 
+    (* Helper function to build the stable actor wrapper *)
+    Func.define_built_in mod_env IC.get_actor_to_persist_function_name [] [I32Type] (fun env ->
+      compile_exp_as env ae2 SR.Vanilla build_stable_actor
+    );
+
     (* Export metadata *)
     env.E.stable_types := metadata "motoko:stable-types" up.meta.sig_;
     env.E.service := metadata "candid:service" up.meta.candid.service;
@@ -12665,7 +12944,9 @@ and conclude_module env set_serialization_globals start_fi_o =
   FuncDec.export_gc_trigger_method env;
 
   (* See Note [Candid subtype checks] *)
-  Serialization.create_global_type_descriptor env set_serialization_globals;
+  Serialization.set_delayed_globals env set_serialization_globals;
+
+  let static_roots = GCRoots.store_static_roots env in
 
   (* declare before building GC *)
 
@@ -12675,15 +12956,14 @@ and conclude_module env set_serialization_globals start_fi_o =
   E.export_global env "__heap_base";
 
   Heap.register env;
+  GCRoots.register env static_roots;
   IC.register env;
 
-  let dynamic_heap_start = Lifecycle.end_ () in
-  set_heap_base dynamic_heap_start;
+  set_heap_base (E.get_end_of_static_memory env);
 
   (* Wrap the start function with the RTS initialization *)
   let rts_start_fi = E.add_fun env "rts_start" (Func.of_body env [] [] (fun env1 ->
-    E.call_import env "rts" ("initialize_incremental_gc") ^^
-    GCRoots.register_static_variables env ^^
+    E.call_import env "rts" ("initialize_" ^ E.gc_strategy_name !Flags.gc_strategy ^ "_gc") ^^
     match start_fi_o with
     | Some fi ->
       G.i (Call fi)
@@ -12699,15 +12979,17 @@ and conclude_module env set_serialization_globals start_fi_o =
 
   let other_imports = E.get_other_imports env in
 
-  let initial_memory_pages = Int32.(add (div dynamic_heap_start page_size) 1l) in
-  let memories = E.get_memories env initial_memory_pages in
+  let memories = E.get_memories env in
 
   let funcs = E.get_funcs env in
 
-  let datas = List.map (fun (dinit) -> nr {
+  let datas = List.map (fun (offset, dinit) -> nr {
     dinit;
-    dmode = (nr Wasm_exts.Ast.Passive);
-    }) (E.get_data_segments env) in
+    dmode = (nr (Wasm_exts.Ast.Active {
+        index = nr 0l;
+        offset = nr (G.to_instr_list (compile_unboxed_const offset));
+      }));
+    }) (E.get_static_memory env) in
 
   let elems = List.map (fun (fi, fp) -> nr {
     index = nr 0l;
@@ -12741,9 +13023,9 @@ and conclude_module env set_serialization_globals start_fi_o =
       motoko = {
         labels = E.get_labs env;
         stable_types = !(env.E.stable_types);
-        compiler = metadata "motoko:compiler" (Lib.Option.get Source_id.release Source_id.id);
+        compiler = metadata "motoko:compiler" (Lib.Option.get Source_id.release Source_id.id)
       };
-      enhanced_orthogonal_persistence = Some (false, "32-bit, layout version 1");
+      enhanced_orthogonal_persistence = None;
       candid = {
         args = !(env.E.args);
         service = !(env.E.service);
@@ -12757,8 +13039,7 @@ and conclude_module env set_serialization_globals start_fi_o =
   | Some rts -> Linking.LinkModule.link emodule "rts" rts
 
 let compile mode rts (prog : Ir.prog) : Wasm_exts.CustomModule.extended_module =
-  assert !Flags.rtti; (* orthogonal persistence requires a fixed layout. *)
-  let env = E.mk_global mode rts IC.trap_with in
+  let env = E.mk_global mode rts IC.trap_with (Lifecycle.end_ ()) in
 
   IC.register_globals env;
   Stack.register_globals env;
@@ -12769,8 +13050,7 @@ let compile mode rts (prog : Ir.prog) : Wasm_exts.CustomModule.extended_module =
 
   (* See Note [Candid subtype checks] *)
   let set_serialization_globals = Serialization.register_delayed_globals env in
-  Serialization.reserve_global_type_descriptor env;
-  
+
   IC.system_imports env;
   RTS.system_imports env;
 
diff --git a/src/codegen/compile.mli b/src/codegen/compile_classical.mli
similarity index 100%
rename from src/codegen/compile.mli
rename to src/codegen/compile_classical.mli
diff --git a/src/codegen/compile_enhanced.ml b/src/codegen/compile_enhanced.ml
new file mode 100644
index 00000000000..080f1acfb7d
--- /dev/null
+++ b/src/codegen/compile_enhanced.ml
@@ -0,0 +1,13273 @@
+(*
+This module is the backend of the Motoko compiler. It takes a program in
+the intermediate representation (ir.ml), and produces a WebAssembly module,
+with Internet Computer extensions (customModule.ml). An important helper module is
+instrList.ml, which provides a more convenient way of assembling WebAssembly
+instruction lists, as it takes care of (1) source locations and (2) labels.
+
+This file is split up in a number of modules, purely for namespacing and
+grouping. Every module has a high-level prose comment explaining the concept;
+this keeps documentation close to the code (a lesson learned from Simon PJ).
+*)
+
+open Ir_def
+open Mo_values
+open Mo_types
+open Mo_config
+
+open Wasm_exts.Ast
+open Wasm_exts.Types
+open Source
+
+(* Re-shadow Source.(@@), to get Stdlib.(@@) *)
+let (@@) = Stdlib.(@@)
+
+module G = InstrList
+let (^^) = G.(^^) (* is this how we import a single operator from a module that we otherwise use qualified? *)
+
+(* WebAssembly pages are 64kb. *)
+let page_size = Int64.of_int (64 * 1024)
+let page_size_bits = 16
+
+(* Our code depends on OCaml int having at least 32 bits *)
+let _ = assert (Sys.int_size >= 32)
+
+(* Scalar Tagging Scheme *)
+
+(* Rationale:
+   Scalar tags are variable length LSBs.
+   A tag (in binary) is either
+   * 10 for Int (leaving 30 bits payload)
+   * 01(0+)0 for unsigned, e.g 0100 for Nat64
+   * 11(0+)0 for signed,   e.g.1100 for Int64
+   Types must be distinguished by tag.
+   LSB must always be 0.
+   Decoding the type of scalar is easy using `ctz` to count the trailing zeros, then
+   switching on the MSB of the tag for sign (if any).
+   We use the *longest* tag that accommodates the required payload bits, to allow room
+   for any future tags that may require more payload bits,
+   e.g. 01(0^14) for Nat8, 11(0^14) for Int8
+   01(0^30) is used for the unit tag (the payload is a trivial zero-length bit string).
+*)
+
+module TaggingScheme = struct
+
+  (*
+     Enable for development only to sanity check value tags and
+     locate unexpected tag errors to compile.ml source lines.
+     Flags.sanity_check will check tags, but not further locate them.
+  *)
+
+  let debug = false (* should never be true in master! *)
+
+  type bit = I | O
+  let _ = (I,O) (* silence warning on unused constructors *)
+
+  type _tag =
+    TBool
+  | TRef
+  | TNum
+  | TNat64 | TInt64
+  | TNat32 | TInt32
+  | TChar
+  | TNat8 | TInt8
+  | TNat16 | TInt16
+  | TUnit
+  | TUnused
+
+  (* Leverage OCaml pattern match compilation to check tagging scheme is injective *)
+  (* OCaml generates stack overflow for _decode:
+  let _decode u64 =
+    match u64 with
+    | ((O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O)) -> TBool (* false *)
+    | ((O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,I)) -> TBool (* true *)
+    | ((_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,_,I,I)) -> TRef  (* 62 bit *)
+    | ((_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,_,I,O)) -> TNum  (* 62 bit *)
+    | ((_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,O,I,O,O)) -> TNat64 (* 60 bit *)
+    | ((_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,I,I,O,O)) -> TInt64
+    | ((_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (O,I,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O)) -> TNat32
+    | ((_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (I,I,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O)) -> TInt32
+    | ((_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (_,_,_,_,_,O,I,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O)) -> TChar
+    | ((_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (O,I,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O)) -> TNat16
+    | ((_,_,_,_,_,_,_,_), (_,_,_,_,_,_,_,_), (I,I,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O)) -> TInt16
+    | ((_,_,_,_,_,_,_,_), (O,I,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O)) -> TNat8
+    | ((_,_,_,_,_,_,_,_), (I,I,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O)) -> TInt8
+    | ((O,I,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O), (O,O,O,O,O,O,O,O)) -> TUnit
+    | _                                                                                                                                                        -> TUnused
+  *)
+
+  let tag_of_typ pty = Type.(
+  if !Flags.rtti then
+    match pty with
+    | Nat
+    | Int ->                                                                        0b10L
+    | Nat64 ->                                                                    0b0100L
+    | Int64 ->                                                                    0b1100L
+    | Nat32 ->                                     0b01000000_00000000_00000000_00000000L
+    | Int32 ->                                     0b11000000_00000000_00000000_00000000L
+    | Char  ->                        0b010_00000000_00000000_00000000_00000000_00000000L
+    | Nat16 ->                   0b01000000_00000000_00000000_00000000_00000000_00000000L
+    | Int16 ->                   0b11000000_00000000_00000000_00000000_00000000_00000000L
+    | Nat8  ->          0b01000000_00000000_00000000_00000000_00000000_00000000_00000000L
+    | Int8  ->          0b11000000_00000000_00000000_00000000_00000000_00000000_00000000L
+    | _  -> assert false
+  else
+    (* no tag *)
+    match pty with
+    | Nat
+    | Int
+    | Nat64
+    | Int64
+    | Nat32
+    | Int32
+    | Char
+    | Nat16
+    | Int16
+    | Nat8
+    | Int8 -> 0L
+    | _  -> assert false)
+
+  let unit_tag = 
+    if !Flags.rtti then
+      (* all tag, no payload (none needed) *)
+      0b01000000_00000000_00000000_00000000_00000000_00000000_00000000_00000000L
+    else
+      (* no tag *)
+      0L
+
+  (* Number of payload bits in compact representation, including any sign *)
+  let ubits_of pty = Type.(
+    if !Flags.rtti then
+      match pty with
+      | Nat | Int     -> 62
+      | Nat64 | Int64 -> 60
+      | Nat32 | Int32 -> 32
+      | Char          -> 21 (* suffices for 21-bit UTF8 codepoints *)
+      | Nat16 | Int16 -> 16
+      | Nat8  | Int8  ->  8
+      | _ -> assert false
+    else
+      match pty with
+      | Nat   | Int   -> 63
+      | Nat64 | Int64 -> 63
+      | Nat32 | Int32 -> 32
+      | Char          -> 21 (* suffices for 21-bit UTF8 codepoints *)
+      | Nat16 | Int16 -> 16
+      | Nat8  | Int8  ->  8
+      | _ -> assert false)
+
+end
+
+(*
+Pointers are skewed (translated) -1 relative to the actual offset.
+See documentation of module BitTagged for more detail.
+*)
+let ptr_skew = -1L
+
+let ptr_unskew = 1L
+
+(* Generating function names for functions parametrized by prim types *)
+let prim_fun_name p stem = Printf.sprintf "%s<%s>" stem (Type.string_of_prim p)
+
+(* Helper functions to produce annotated terms (Wasm.AST) *)
+let nr x = Wasm.Source.{ it = x; at = no_region }
+
+let todo fn se x = Printf.eprintf "%s: %s" fn (Wasm.Sexpr.to_string 80 se); x
+
+exception CodegenError of string
+let fatal fmt = Printf.ksprintf (fun s -> raise (CodegenError s)) fmt
+
+module StaticBytes = struct
+  (* A very simple DSL to describe static memory *)
+
+  type t_ =
+    | I32 of int32
+    | I64 of int64
+    | Seq of t
+    | Bytes of string
+
+  and t = t_ list
+
+  let i64s is = Seq (List.map (fun i -> I64 i) is)
+
+  let rec add : Buffer.t -> t_ -> unit = fun buf -> function
+    | I32 i -> Buffer.add_int32_le buf i
+    | I64 i -> Buffer.add_int64_le buf i
+    | Seq xs -> List.iter (add buf) xs
+    | Bytes b -> Buffer.add_string buf b
+
+  let as_bytes : t -> string = fun xs ->
+    let buf = Buffer.create 32 in
+    List.iter (add buf) xs;
+    Buffer.contents buf
+
+  let as_words static_bytes =
+    let rec convert_to_words binary index = 
+      assert (index <= (Bytes.length binary));
+      if (Bytes.length binary) = index then 
+        []
+      else 
+        let number = Bytes.get_int64_le binary index in
+        let next_index = Int.add index 8 in
+        [number] @ (convert_to_words binary next_index)
+    in
+    convert_to_words (Bytes.of_string (as_bytes static_bytes)) 0
+
+end (* StaticBytes *)
+
+module Const = struct
+
+  (* Literals, as used in constant values. This is a projection of Ir.Lit,
+     combining cases whose details we no longer care about.
+     Should be still precise enough to map to the cases supported by SR.t.
+
+     In other words: It is the smallest type that allows these three functions:
+
+       (* projection of Ir.list. NB: pure, no access to env *)
+       const_lit_of_lit : Ir.lit -> Const.lit (* NB: pure, no access to env *)
+
+       (* creates vanilla representation (e.g. to put in static data structures *)
+       vanilla_lit : E.env -> Const.lit -> i64
+
+       (* creates efficient stack representation *)
+       compile_lit : E.env -> Const.lit -> (SR.t, code)
+
+  *)
+
+  type lit =
+    | Vanilla of int64 (* small words, no static data, already in vanilla format *)
+    | BigInt of Big_int.big_int
+    | Bool of bool
+    | Word64 of Type.prim * int64
+    | Float64 of Numerics.Float.t
+    | Text of string
+    | Blob of string
+    | Null
+
+  let lit_eq l1 l2 = match l1, l2 with
+    | Vanilla i, Vanilla j -> i = j
+    | BigInt i, BigInt j -> Big_int.eq_big_int i j
+    | Word64 (tyi, i), Word64 (tyj, j) -> tyi = tyj && i = j
+    | Float64 i, Float64 j -> i = j
+    | Bool i, Bool j -> i = j
+    | Text s, Text t
+    | Blob s, Blob t -> s = t
+    | Null, Null -> true
+    | _ -> false
+
+  (* Inlineable functions
+
+     The prelude/prim.mo is full of functions simply wrapping a prim, e.g.
+
+        func int64ToNat64(n : Int64) : Nat64 = (prim "num_wrap_Int64_Nat64" : Int64 -> Nat64) n;
+
+     generating a Wasm function for them and calling them is absurdly expensive
+     when the prim is just a simple Wasm instruction. Also, it requires boxing
+     and unboxing arguments and results.
+
+     So we recognize such functions when creating the `const` summary, and use the prim
+     directly when calling such function.
+
+     Can be extended to cover more forms of inlineable functions.
+  *)
+  type fun_rhs =
+    | Complicated (* no inlining possible *)
+    | PrimWrapper of Ir.prim
+
+  (* Constant known values.
+
+     These are values that
+     * are completely known constantly
+     * do not require Wasm code to be executed (e.g. in `start`)
+     * can be used directly (e.g. Call, not CallIndirect)
+     * can be turned into Vanilla heap data on demand
+
+     See ir_passes/const.ml for what precisely we can compile as const now.
+  *)
+
+  type v =
+    | Fun of int32 * (unit -> int32) * fun_rhs (* function pointer calculated upon first use *)
+    | Message of int32 (* anonymous message, only temporary *)
+    | Obj of (string * v) list
+    | Unit
+    | Array of v list (* immutable arrays *)
+    | Tuple of v list (* non-nullary tuples *)
+    | Tag of (string * v)
+    | Opt of v
+    | Lit of lit
+
+  let rec eq v1 v2 = match v1, v2 with
+    | Fun (id1, _, _), Fun (id2, _, _) -> id1 = id2
+    | Message fi1, Message fi2 -> fi1 = fi2
+    | Obj fields1, Obj fields2 ->
+      let equal_fields (name1, field_value1) (name2, field_value2) = (name1 = name2) && (eq field_value1 field_value2) in
+      List.for_all2 equal_fields fields1 fields2
+    | Unit, Unit -> true
+    | Array elements1, Array elements2 ->
+      List.for_all2 eq elements1 elements2
+    | Tuple elements1, Tuple elements2 ->
+      List.for_all2 eq elements1 elements2
+    | Tag (name1, tag_value1), Tag (name2, tag_value2) ->
+      (name1 = name2) && (eq tag_value1 tag_value2)
+    | Opt opt_value1, Opt opt_value2 -> eq opt_value1 opt_value2
+    | Lit l1, Lit l2 -> lit_eq l1 l2
+    | Fun _, _ | Message _, _ | Obj _, _ | Unit, _ 
+    | Array _, _ | Tuple _, _ | Tag _, _ | Opt _, _ 
+    | Lit _, _ -> false
+
+end (* Const *)
+
+module SR = struct
+  (* This goes with the StackRep module, but we need the types earlier *)
+
+  (* Value representation on the stack:
+
+     Compiling an expression means putting its value on the stack. But
+     there are various ways of putting a value onto the stack -- unboxed,
+     tupled etc.
+   *)
+  type t =
+    | Vanilla
+    | UnboxedTuple of int
+    | UnboxedWord64 of Type.prim
+    | UnboxedFloat64
+    | Unreachable
+    | Const of Const.v
+
+  let unit = UnboxedTuple 0
+
+  let bool = Vanilla
+
+  (* Because t contains Const.t, and that contains Const.v, and that contains
+     Const.lit, and that contains Big_int, we cannot just use normal `=`. So we
+     have to write our own equality.
+  *)
+  let eq (t1 : t) (t2 : t) = match t1, t2 with
+    | Const c1, Const c2 -> Const.eq c1 c2
+    | _ -> t1 = t2
+
+  let to_var_type : t -> value_type = function
+    | Vanilla -> I64Type
+    | UnboxedWord64 _ -> I64Type
+    | UnboxedFloat64 -> F64Type
+    | UnboxedTuple n -> fatal "to_var_type: UnboxedTuple"
+    | Const _ -> fatal "to_var_type: Const"
+    | Unreachable -> fatal "to_var_type: Unreachable"
+
+end (* SR *)
+
+(*
+
+** The compiler environment.
+
+Of course, as we go through the code we have to track a few things; these are
+put in the compiler environment, type `E.t`. Some fields are valid globally, some
+only make sense locally, i.e. within a single function (but we still put them
+in one big record, for convenience).
+
+The fields fall into the following categories:
+
+ 1. Static global fields. Never change.
+    Example: whether we are compiling with -no-system-api
+
+ 2. Mutable global fields. Change only monotonically.
+    These are used to register things like functions. This should be monotone
+    in the sense that entries are only added, and that the order should not
+    matter in a significant way. In some instances, the list contains futures
+    so that we can reserve and know the _position_ of the thing before we have
+    to actually fill it in.
+
+ 3. Static local fields. Never change within a function.
+    Example: number of parameters and return values
+
+ 4. Mutable local fields. See above
+    Example: Name and type of locals.
+
+**)
+
+(* Before we can define the environment, we need some auxillary types *)
+
+module E = struct
+
+  (* Utilities, internal to E *)
+  let reg (ref : 'a list ref) (x : 'a) : int32 =
+      let i = Wasm.I32.of_int_u (List.length !ref) in
+      ref := !ref @ [ x ];
+      i
+
+  let reserve_promise (ref : 'a Lib.Promise.t list ref) _s : (int32 * ('a -> unit)) =
+      let p = Lib.Promise.make () in (* For debugging with named promises, use s here *)
+      let i = Wasm.I32.of_int_u (List.length !ref) in
+      ref := !ref @ [ p ];
+      (i, Lib.Promise.fulfill p)
+
+
+  (* The environment type *)
+  module NameEnv = Env.Make(String)
+  module StringEnv = Env.Make(String)
+  module LabSet = Set.Make(String)
+  module FeatureSet = Set.Make(String)
+
+  module FunEnv = Env.Make(Int32)
+  type local_names = (int32 * string) list (* For the debug section: Names of locals *)
+  type func_with_names = func * local_names
+  type lazy_function = (int32, func_with_names) Lib.AllocOnUse.t
+  type type_descriptor = {
+    candid_data_segment : int32;
+    type_offsets_segment : int32;
+    idl_types_segment : int32;
+  }
+  (* Object allocation code. *)
+  type object_allocation = t -> G.t
+  (* Pool of shared objects.
+     Alllocated in the dynamic heap on program initialization/upgrade.
+     Identified by the index position in this list and accessed via the runtime system.
+     Registered as GC root set and replaced on program upgrade. 
+  *)
+  and object_pool = {
+    objects: object_allocation list ref;
+    frozen: bool ref;
+  }
+  and t = {
+    (* Global fields *)
+    (* Static *)
+    mode : Flags.compile_mode;
+    rts : Wasm_exts.CustomModule.extended_module option; (* The rts. Re-used when compiling actors *)
+    trap_with : t -> string -> G.t;
+      (* Trap with message; in the env for dependency injection *)
+
+    (* Per module fields (only valid/used inside a module) *)
+    (* Immutable *)
+
+    (* Mutable *)
+    func_types : func_type list ref;
+    func_imports : import list ref;
+    other_imports : import list ref;
+    exports : export list ref;
+    funcs : (func * string * local_names) Lib.Promise.t list ref;
+    func_ptrs : int32 FunEnv.t ref;
+    end_of_table : int32 ref;
+    globals : (global Lib.Promise.t * string) list ref;
+    global_names : int32 NameEnv.t ref;
+    named_imports : int32 NameEnv.t ref;
+    built_in_funcs : lazy_function NameEnv.t ref;
+    static_strings : int32 StringEnv.t ref;
+    data_segments : string list ref; (* Passive data segments *)
+    object_pool : object_pool;
+      
+    (* Types accumulated in global typtbl (for candid subtype checks)
+       See Note [Candid subtype checks]
+    *)
+    typtbl_typs : Type.typ list ref;
+
+    (* Metadata *)
+    args : (bool * string) option ref;
+    service : (bool * string) option ref;
+    stable_types : (bool * string) option ref;
+    labs : LabSet.t ref; (* Used labels (fields and variants),
+                            collected for Motoko custom section 0 *)
+
+    (* Local fields (only valid/used inside a function) *)
+    (* Static *)
+    n_param : int32; (* Number of parameters (to calculate indices of locals) *)
+    return_arity : int; (* Number of return values (for type of Return) *)
+
+    (* Mutable *)
+    locals : value_type list ref; (* Types of locals *)
+    local_names : (int32 * string) list ref; (* Names of locals *)
+
+    features : FeatureSet.t ref; (* Wasm features using wasmtime naming *)
+
+    (* requires stable memory (and emulation on wasm targets) *)
+    requires_stable_memory : bool ref;
+
+    (* Type descriptor of current program version, created on `conclude_module`. *)
+    global_type_descriptor : type_descriptor option ref;
+
+    (* Counter for deriving a unique id per constant function. *)
+    constant_functions : int32 ref;
+  }
+
+  (* Compile-time-known value, either a plain vanilla constant or a shared object. *)
+  type shared_value = 
+  | Vanilla of int64
+  | SharedObject of int64 (* index in object pool *)
+
+  (* The initial global environment *)
+  let mk_global mode rts trap_with : t = {
+    mode;
+    rts;
+    trap_with;
+    func_types = ref [];
+    func_imports = ref [];
+    other_imports = ref [];
+    exports = ref [];
+    funcs = ref [];
+    func_ptrs = ref FunEnv.empty;
+    end_of_table = ref 0l;
+    globals = ref [];
+    global_names = ref NameEnv.empty;
+    named_imports = ref NameEnv.empty;
+    built_in_funcs = ref NameEnv.empty;
+    static_strings = ref StringEnv.empty;
+    data_segments = ref [];
+    object_pool = { objects = ref []; frozen = ref false };
+    typtbl_typs = ref [];
+    (* Metadata *)
+    args = ref None;
+    service = ref None;
+    stable_types = ref None;
+    labs = ref LabSet.empty;
+    (* Actually unused outside mk_fun_env: *)
+    n_param = 0l;
+    return_arity = 0;
+    locals = ref [];
+    local_names = ref [];
+    features = ref FeatureSet.empty;
+    requires_stable_memory = ref false;
+    global_type_descriptor = ref None;
+    constant_functions = ref 0l;
+  }
+
+  (* This wraps Mo_types.Hash.hash to also record which labels we have seen,
+      so that that data can be put in a custom section, useful for debugging.
+      Thus Mo_types.Hash.hash should not be called directly!
+   *)
+  let hash (env : t) lab =
+    env.labs := LabSet.add lab (!(env.labs));
+    Wasm.I64_convert.extend_i32_u (Mo_types.Hash.hash lab)
+
+  let get_labs env = LabSet.elements (!(env.labs))
+
+  let mk_fun_env env n_param return_arity =
+    { env with
+      n_param;
+      return_arity;
+      locals = ref [];
+      local_names = ref [];
+    }
+
+  (* We avoid accessing the fields of t directly from outside of E, so here are a
+     bunch of accessors. *)
+
+  let mode (env : t) = env.mode
+
+  let add_anon_local (env : t) ty =
+    let i = reg env.locals ty in
+    Wasm.I32.add env.n_param i
+
+  let add_local_name (env : t) li name =
+    let _ = reg env.local_names (li, name) in ()
+
+  let get_locals (env : t) = !(env.locals)
+  let get_local_names (env : t) : (int32 * string) list = !(env.local_names)
+
+  let _add_other_import (env : t) m =
+    ignore (reg env.other_imports m)
+
+  let add_export (env : t) e =
+    ignore (reg env.exports e)
+
+  let add_global (env : t) name g =
+    assert (not (NameEnv.mem name !(env.global_names)));
+    let gi = reg env.globals (g, name) in
+    env.global_names := NameEnv.add name gi !(env.global_names)
+
+  let add_global64_delayed (env : t) name mut : int64 -> unit =
+    let p = Lib.Promise.make () in
+    add_global env name p;
+    (fun init ->
+      Lib.Promise.fulfill p (nr {
+        gtype = GlobalType (I64Type, mut);
+        value = nr (G.to_instr_list (G.i (Const (nr (Wasm_exts.Values.I64 init)))))
+      })
+    )
+  let add_global64 (env : t) name mut init =
+    add_global64_delayed env name mut init
+
+  let get_global (env : t) name : int32 =
+    match NameEnv.find_opt name !(env.global_names) with
+    | Some gi -> gi
+    | None -> raise (Invalid_argument (Printf.sprintf "No global named %s declared" name))
+
+  let get_global64_lazy (env : t) name mut init : int32 =
+    match NameEnv.find_opt name !(env.global_names) with
+    | Some gi -> gi
+    | None -> add_global64 env name mut init; get_global env name
+
+  let export_global env name =
+    add_export env (nr {
+      name = Lib.Utf8.decode name;
+      edesc = nr (GlobalExport (nr (get_global env name)))
+    })
+
+  let get_globals (env : t) = List.map (fun (g,n) -> Lib.Promise.value g) !(env.globals)
+
+  let reserve_fun (env : t) name =
+    let (j, fill) = reserve_promise env.funcs name in
+    let n = Int32.of_int (List.length !(env.func_imports)) in
+    let fi = Int32.add j n in
+    let fill_ (f, local_names) = fill (f, name, local_names) in
+    (fi, fill_)
+
+  let add_fun (env : t) name (f, local_names) =
+    let (fi, fill) = reserve_fun env name in
+    fill (f, local_names);
+    fi
+
+  let make_lazy_function env name : lazy_function =
+    Lib.AllocOnUse.make (fun () -> reserve_fun env name)
+
+  let get_constant_function_id (env : t) : int32 =
+    let id = !(env.constant_functions) in
+    env.constant_functions := (Int32.add id 1l);
+    id
+
+  let lookup_built_in (env : t) name : lazy_function =
+    match NameEnv.find_opt name !(env.built_in_funcs) with
+    | None ->
+      let lf = make_lazy_function env name in
+      env.built_in_funcs := NameEnv.add name lf !(env.built_in_funcs);
+      lf
+    | Some lf -> lf
+
+  let built_in (env : t) name : int32 =
+    Lib.AllocOnUse.use (lookup_built_in env name)
+
+  let define_built_in (env : t) name mk_fun : unit =
+    Lib.AllocOnUse.def  (lookup_built_in env name) mk_fun
+
+  let get_return_arity (env : t) = env.return_arity
+
+  let get_func_imports (env : t) = !(env.func_imports)
+  let get_other_imports (env : t) = !(env.other_imports)
+  let get_exports (env : t) = !(env.exports)
+  let get_funcs (env : t) = List.map Lib.Promise.value !(env.funcs)
+
+  let func_type (env : t) ty =
+    let rec go i = function
+      | [] -> env.func_types := !(env.func_types) @ [ ty ]; Int32.of_int i
+      | ty'::tys when ty = ty' -> Int32.of_int i
+      | _ :: tys -> go (i+1) tys
+       in
+    go 0 !(env.func_types)
+
+  let get_types (env : t) = !(env.func_types)
+
+  let add_func_import (env : t) modname funcname arg_tys ret_tys =
+    if !(env.funcs) <> [] then
+      raise (CodegenError "Add all imports before all functions!");
+
+    let i = {
+      module_name = Lib.Utf8.decode modname;
+      item_name = Lib.Utf8.decode funcname;
+      idesc = nr (FuncImport (nr (func_type env (FuncType (arg_tys, ret_tys)))))
+    } in
+    let fi = reg env.func_imports (nr i) in
+    let name = modname ^ "." ^ funcname in
+    assert (not (NameEnv.mem name !(env.named_imports)));
+    env.named_imports := NameEnv.add name fi !(env.named_imports)
+
+  let call_import (env : t) modname funcname =
+    let name = modname ^ "." ^ funcname in
+    match NameEnv.find_opt name !(env.named_imports) with
+      | Some fi -> G.i (Call (nr fi))
+      | _ ->
+        raise (Invalid_argument (Printf.sprintf "Function import not declared: %s\n" name))
+
+  let reuse_import (env : t) modname funcname =
+    let name = modname ^ "." ^ funcname in
+    match NameEnv.find_opt name !(env.named_imports) with
+      | Some fi -> fi
+      | _ ->
+        raise (Invalid_argument (Printf.sprintf "Function import not declared: %s\n" name))
+
+  let get_rts (env : t) = env.rts
+
+  let as_block_type env : stack_type -> block_type = function
+    | [] -> ValBlockType None
+    | [t] -> ValBlockType (Some t)
+    | ts -> VarBlockType (nr (func_type env (FuncType ([], ts))))
+
+
+  let prepare_branch_condition =
+    G.i (Convert (Wasm_exts.Values.I32 I32Op.WrapI64))
+  let if0 then_block else_block =
+    prepare_branch_condition ^^
+    G.if0 then_block else_block
+  let if1 return_type then_block else_block =
+    prepare_branch_condition ^^
+    G.if1 return_type then_block else_block
+
+  let if_ env tys thn els = prepare_branch_condition ^^ G.if_ (as_block_type env tys) thn els
+  let block_ env tys bdy = G.block_ (as_block_type env tys) bdy
+
+
+  let trap_with env msg = env.trap_with env msg
+  let then_trap_with env msg = if0 (trap_with env msg) G.nop
+  let else_trap_with env msg = if0 G.nop (trap_with env msg)
+
+  let add_data_segment (env : t) data : int32 =
+    let index = List.length !(env.data_segments) in
+    env.data_segments := !(env.data_segments) @ [ data ];
+    Int32.of_int index
+
+  let add_fun_ptr (env : t) fi : int32 =
+    match FunEnv.find_opt fi !(env.func_ptrs) with
+    | Some fp -> fp
+    | None ->
+      let fp = !(env.end_of_table) in
+      env.func_ptrs := FunEnv.add fi fp !(env.func_ptrs);
+      env.end_of_table := Int32.add !(env.end_of_table) 1l;
+      fp
+
+  let get_elems env =
+    FunEnv.bindings !(env.func_ptrs)
+
+  let get_end_of_table env : int32 =
+    !(env.end_of_table)
+
+  let add_static (env : t) (data : StaticBytes.t) : int32 =
+    let b = StaticBytes.as_bytes data in
+    match StringEnv.find_opt b !(env.static_strings)  with
+    | Some segment_index -> segment_index
+    | None ->
+      let segment_index = add_data_segment env b  in
+      env.static_strings := StringEnv.add b segment_index !(env.static_strings);
+      segment_index
+
+  let replace_data_segment (env : t) (segment_index : int32) (data : StaticBytes.t) : int64 =
+    let new_value = StaticBytes.as_bytes data in
+    let segment_index = Int32.to_int segment_index in
+    assert (segment_index < List.length !(env.data_segments));
+    env.data_segments := List.mapi (fun index old_value -> 
+      if index = segment_index then
+        (assert (old_value = "");
+        new_value)
+      else 
+        old_value
+      ) !(env.data_segments);
+    Int64.of_int (String.length new_value)
+
+  let get_data_segments (env : t) =
+    !(env.data_segments)
+
+  let object_pool_add (env : t) (allocation : t -> G.t) : int64 =
+    if !(env.object_pool.frozen) then raise (Invalid_argument "Object pool frozen");
+    let index = List.length !(env.object_pool.objects) in
+    env.object_pool.objects := !(env.object_pool.objects) @ [ allocation ];
+    Int64.of_int index
+
+  let object_pool_size (env : t) : int =
+    List.length !(env.object_pool.objects)
+
+  let iterate_object_pool (env : t) f =
+    G.concat_mapi f !(env.object_pool.objects)
+
+  let collect_garbage env force =
+    let name = "incremental_gc" in
+    let gc_fn = if force || !Flags.force_gc then name else "schedule_" ^ name in
+    call_import env "rts" gc_fn
+
+  (* See Note [Candid subtype checks] *)
+  (* NB: we don't bother detecting duplicate registrations here because the code sharing machinery
+     ensures that `add_typtbl_typ t` is called at most once for any `t` with a distinct type hash *)
+  let add_typtbl_typ (env : t) ty : Int32.t =
+    reg env.typtbl_typs ty
+
+  let get_typtbl_typs (env : t) : Type.typ list =
+    !(env.typtbl_typs)
+
+  let add_feature (env : t) f =
+    env.features := FeatureSet.add f (!(env.features))
+
+  let get_features (env : t) = FeatureSet.elements (!(env.features))
+
+  let require_stable_memory (env : t)  =
+    if not !(env.requires_stable_memory)
+    then
+      (env.requires_stable_memory := true;
+       match mode env with
+       | Flags.ICMode | Flags.RefMode ->
+          ()
+       | Flags.WASIMode | Flags.WasmMode ->
+          add_feature env "bulk-memory";
+          add_feature env "multi-memory")
+
+  let requires_stable_memory (env : t) =
+    !(env.requires_stable_memory)
+
+  let get_memories (env : t) initial_memory_pages =
+    nr {mtype = MemoryType ({min = initial_memory_pages; max = None}, I64IndexType)}
+    ::
+    match mode env with
+    | Flags.WASIMode | Flags.WasmMode when !(env.requires_stable_memory) ->
+      [ nr {mtype = MemoryType ({min = Int64.zero; max = None}, I64IndexType)} ]
+    | _ -> []
+end
+
+
+(* General code generation functions:
+   Rule of thumb: Here goes stuff that independent of the Motoko AST.
+*)
+
+(* Function called compile_* return a list of instructions (and maybe other stuff) *)
+
+
+let compile_comparison rel = 
+  G.i (Compare (Wasm_exts.Values.I64 rel)) ^^
+  G.i (Convert (Wasm_exts.Values.I64 I64Op.ExtendUI32))
+let compile_comparison_32 rel = 
+  G.i (Compare (Wasm_exts.Values.I32 rel)) ^^
+  G.i (Convert (Wasm_exts.Values.I64 I64Op.ExtendUI32))
+let compile_test op =
+  G.i (Test (Wasm_exts.Values.I64 op)) ^^
+  G.i (Convert (Wasm_exts.Values.I64 I64Op.ExtendUI32))
+let compile_comparison_f64 rel = 
+  G.i (Compare (Wasm_exts.Values.F64 rel)) ^^
+  G.i (Convert (Wasm_exts.Values.I64 I64Op.ExtendUI32))
+
+let compile_unboxed_const i = G.i (Const (nr (Wasm_exts.Values.I64 i)))
+let compile_const_32 i = G.i (Const (nr (Wasm_exts.Values.I32 i)))
+let compile_unboxed_zero = compile_unboxed_const 0L
+let compile_unboxed_one = compile_unboxed_const 1L
+
+(* Some common arithmetic, used for pointer and index arithmetic *)
+let compile_op_const op i =
+    compile_unboxed_const i ^^
+    G.i (Binary (Wasm_exts.Values.I64 op))
+let compile_add_const = compile_op_const I64Op.Add
+let compile_sub_const = compile_op_const I64Op.Sub
+let compile_mul_const = compile_op_const I64Op.Mul
+let compile_divU_const = compile_op_const I64Op.DivU
+let compile_shrU_const = compile_op_const I64Op.ShrU
+let compile_shrS_const = compile_op_const I64Op.ShrS
+let compile_shl_const = compile_op_const I64Op.Shl
+let compile_rotl_const = compile_op_const I64Op.Rotl
+let compile_rotr_const = compile_op_const I64Op.Rotr
+let compile_bitand_const = compile_op_const I64Op.And
+let compile_bitor_const = function
+  | 0L -> G.nop | n -> compile_op_const I64Op.Or n
+let compile_xor_const = function
+  | 0L -> G.nop | n -> compile_op_const I64Op.Xor n
+let compile_rel_const rel i =
+  compile_unboxed_const i ^^
+  compile_comparison rel
+let compile_eq_const = function
+  | 0L -> compile_test I64Op.Eqz
+  | i -> compile_rel_const I64Op.Eq i
+
+let compile_op32_const op i =
+    compile_const_32 i ^^
+    G.i (Binary (Wasm_exts.Values.I32 op))
+let compile_add32_const = compile_op32_const I32Op.Add
+let _compile_sub32_const = compile_op32_const I32Op.Sub
+let _compile_mul32_const = compile_op32_const I32Op.Mul
+let _compile_divU32_const = compile_op32_const I32Op.DivU
+let _compile_shrU32_const = function
+  | 0l -> G.nop | n -> compile_op32_const I32Op.ShrU n
+let _compile_shrS32_const = function
+  | 0l -> G.nop | n -> compile_op32_const I32Op.ShrS n
+let _compile_shl32_const = function
+  | 0l -> G.nop | n -> compile_op32_const I32Op.Shl n
+let compile_eq32_const i =
+  compile_const_32 i ^^
+  compile_comparison_32 I32Op.Eq
+
+(* Analogous to Lib.Uint32.compare *)
+let compare_uint64 i1 i2 =
+  if i1 < 0L && i2 >= 0L then 1
+  else if i1 >= 0L && i2 < 0L then -1
+  else Int64.compare i1 i2
+
+(* A common variant of todo *)
+
+let todo_trap env fn se = todo fn se (E.trap_with env ("TODO: " ^ fn))
+let _todo_trap_SR env fn se = todo fn se (SR.Unreachable, E.trap_with env ("TODO: " ^ fn))
+
+(* Locals *)
+
+let new_local_ env t name =
+  let i = E.add_anon_local env t in
+  E.add_local_name env i name;
+  ( G.i (LocalSet (nr i))
+  , G.i (LocalGet (nr i))
+  , i
+  )
+
+let new_local env name =
+  let (set_i, get_i, _) = new_local_ env I64Type name
+  in (set_i, get_i)
+
+let new_local32 env name =
+  let (set_i, get_i, _) = new_local_ env I32Type name
+  in (set_i, get_i)
+
+(* Some common code macros *)
+
+(* Iterates while cond is true. *)
+let compile_while env cond body =
+    G.loop0 (
+      cond ^^ E.if0 (body ^^ G.i (Br (nr 1l))) G.nop
+    )
+
+(* Expects a number n on the stack. Iterates from m to below that number. *)
+let from_m_to_n env m mk_body =
+    let (set_n, get_n) = new_local env "n" in
+    let (set_i, get_i) = new_local env "i" in
+    set_n ^^
+    compile_unboxed_const m ^^
+    set_i ^^
+
+    compile_while env
+      ( get_i ^^
+        get_n ^^
+        compile_comparison I64Op.LtU
+      ) (
+        mk_body get_i ^^
+
+        get_i ^^
+        compile_add_const 1L ^^
+        set_i
+      )
+
+(* Expects a number on the stack. Iterates from zero to below that number. *)
+let from_0_to_n env mk_body = from_m_to_n env 0L mk_body
+
+(* Pointer reference and dereference  *)
+
+let load_unskewed_ptr : G.t =
+  G.i (Load {ty = I64Type; align = 3; offset = 0L; sz = None})
+
+let store_unskewed_ptr : G.t =
+  G.i (Store {ty = I64Type; align = 3; offset = 0L; sz = None})
+  
+let load_ptr : G.t =
+  G.i (Load {ty = I64Type; align = 3; offset = ptr_unskew; sz = None})
+
+let store_ptr : G.t =
+  G.i (Store {ty = I64Type; align = 3; offset = ptr_unskew; sz = None})
+
+let narrow_to_32 env get_value =
+  get_value ^^
+  compile_unboxed_const 0xffff_ffffL ^^
+  compile_comparison I64Op.LeU ^^
+  E.else_trap_with env "cannot narrow to 32 bit" ^^ (* Note: If narrow fails during print, the trap print leads to an infinite recursion and a stack overflow *)
+  get_value ^^
+  G.i (Convert (Wasm_exts.Values.I32 I32Op.WrapI64))
+
+module FakeMultiVal = struct
+  (* For some use-cases (e.g. processing the compiler output with analysis
+     tools) it is useful to avoid the multi-value extension.
+
+     This module provides mostly transparent wrappers that put multiple values
+     in statically allocated globals and pull them off again.
+
+     So far only does I64Type (but that could be changed).
+
+     If the multi_value flag is on, these do not do anything.
+  *)
+  let ty tys =
+    if !Flags.multi_value || List.length tys <= 1
+    then tys
+    else []
+
+  let global env i =
+    E.get_global64_lazy env (Printf.sprintf "multi_val_%d" i) Mutable 0L
+
+  let store env tys =
+    if !Flags.multi_value || List.length tys <= 1 then G.nop else
+    G.concat_mapi (fun i ty ->
+      assert(ty = I64Type);
+      G.i (GlobalSet (nr (global env i)))
+    ) tys
+
+  let load env tys =
+    if !Flags.multi_value || List.length tys <= 1 then G.nop else
+    let n = List.length tys - 1 in
+    G.concat_mapi (fun i ty ->
+      assert(ty = I64Type);
+      G.i (GlobalGet (nr (global env (n - i))))
+    ) tys
+
+  (* A drop-in replacement for E.if_ *)
+  let if_ env bt thn els =
+    E.if_ env (ty bt) (thn ^^ store env bt) (els ^^ store env bt) ^^
+    load env bt
+
+  (* A block that can be exited from *)
+  let block_ env bt body =
+    E.block_ env (ty bt) (G.with_current_depth (fun depth ->
+      body (store env bt ^^ G.branch_to_ depth)
+    )) ^^
+    load env bt
+
+end (* FakeMultiVal *)
+
+module Func = struct
+  (* This module contains basic bookkeeping functionality to define functions,
+     in particular creating the environment, and finally adding it to the environment.
+  *)
+
+
+  let of_body env params retty mk_body =
+    let env1 = E.mk_fun_env env (Int32.of_int (List.length params)) (List.length retty) in
+    List.iteri (fun i (n,_t) -> E.add_local_name env1 (Int32.of_int i) n) params;
+    let ty = FuncType (List.map snd params, FakeMultiVal.ty retty) in
+    let body = G.to_instr_list (
+      mk_body env1 ^^ FakeMultiVal.store env1 retty
+    ) in
+    (nr { ftype = nr (E.func_type env ty);
+          locals = E.get_locals env1;
+          body }
+    , E.get_local_names env1)
+
+  let define_built_in env name params retty mk_body =
+    E.define_built_in env name (lazy (of_body env params retty mk_body))
+
+  type sharing =
+    Always (* i.e. never inline *)
+  | Never  (* i.e. always inline *)
+
+  (* (Almost) transparently lift code into a function and call this function,
+     unless sharing = Never and not (!Flags.share_code) in which case the code
+     is inlined.
+     NB: inlined code must not be recursive nor `return`.
+  *)
+  (* Also add a hack to support multiple return values *)
+  let share_code sharing env name params retty mk_body =
+    if sharing = Always || !Flags.share_code
+    then
+      let getters =
+        List.mapi
+          (fun i (n, t) -> (G.i (LocalGet (nr (Int32.of_int i)))))
+          params
+      in
+      define_built_in env name params retty (fun env -> mk_body env getters);
+      G.i (Call (nr (E.built_in env name))) ^^
+      FakeMultiVal.load env retty
+    else begin
+      assert (sharing = Never);
+      let locals =
+        List.map
+           (fun (n, t) -> new_local_ env t n)
+           params
+      in
+      let set_locals = List.fold_right (fun (set, get, _) is-> is ^^ set) locals G.nop in
+      let getters = List.map (fun (set, get, _) -> get) locals in
+      set_locals ^^
+      mk_body env getters ^^ FakeMultiVal.store env retty ^^
+      FakeMultiVal.load env retty
+   end
+
+  (* Shorthands for various arities *)
+  let [@warning "-8"] share_code0 sharing env name retty mk_body =
+    share_code sharing env name [] retty (fun env [] -> mk_body env)
+  let [@warning "-8"] share_code1 sharing env name p1 retty mk_body =
+    share_code sharing env name [p1] retty (fun env [g1] -> mk_body env
+        g1
+    )
+  let [@warning "-8"] share_code2 sharing env name (p1,p2) retty mk_body =
+    share_code sharing env name [p1; p2] retty (fun env [g1; g2] -> mk_body env
+      g1
+      g2
+    )
+  let [@warning "-8"] share_code3 sharing env name (p1, p2, p3) retty mk_body =
+    share_code sharing env name [p1; p2; p3] retty (fun env [g1; g2; g3] -> mk_body env
+      g1
+      g2
+      g3
+    )
+  let [@warning "-8"] _share_code4 sharing env name (p1, p2, p3, p4) retty mk_body =
+    share_code sharing env name [p1; p2; p3; p4] retty (fun env [g1; g2; g3; g4]-> mk_body env
+      g1
+      g2
+      g3
+      g4
+    )
+  let [@warning "-8"] _share_code6 sharing env name (p1, p2, p3, p4, p5, p6) retty mk_body =
+    share_code sharing env name [p1; p2; p3; p4; p5; p6] retty (fun env [g1; g2; g3; g4; g5; g6] -> mk_body env
+      g1
+      g2
+      g3
+      g4
+      g5
+      g6
+    )
+  let [@warning "-8"] _share_code7 sharing env name (p1, p2, p3, p4, p5, p6, p7) retty mk_body =
+    share_code sharing env name [p1; p2; p3; p4; p5; p6; p7] retty (fun env [g1; g2; g3; g4; g5; g6; g7] -> mk_body env
+      g1
+      g2
+      g3
+      g4
+      g5
+      g6
+      g7
+    )
+
+  let [@warning "-8"] _share_code9 sharing env name (p1, p2, p3, p4, p5, p6, p7, p8, p9) retty mk_body =
+    share_code sharing env name [p1; p2; p3; p4; p5; p6; p7; p8; p9] retty (fun env [g1; g2; g3; g4; g5; g6; g7; g8; g9] -> mk_body env
+      g1
+      g2
+      g3
+      g4
+      g5
+      g6
+      g7
+      g8
+      g9
+    )
+
+
+end (* Func *)
+
+module RTS = struct
+  (* The connection to the C and Rust parts of the RTS *)
+  let system_imports env =
+    E.add_func_import env "rts" "initialize_incremental_gc" [] [];
+    E.add_func_import env "rts" "schedule_incremental_gc" [] [];
+    E.add_func_import env "rts" "incremental_gc" [] [];
+    E.add_func_import env "rts" "write_with_barrier" [I64Type; I64Type] [];
+    E.add_func_import env "rts" "allocation_barrier" [I64Type] [I64Type];
+    E.add_func_import env "rts" "running_gc" [] [I32Type];
+    E.add_func_import env "rts" "register_stable_type" [I64Type; I64Type] [];
+    E.add_func_import env "rts" "load_stable_actor" [] [I64Type];
+    E.add_func_import env "rts" "save_stable_actor" [I64Type] [];
+    E.add_func_import env "rts" "free_stable_actor" [] [];
+    E.add_func_import env "rts" "contains_field" [I64Type; I64Type] [I32Type];
+    E.add_func_import env "rts" "initialize_static_variables" [I64Type] [];
+    E.add_func_import env "rts" "get_static_variable" [I64Type] [I64Type];
+    E.add_func_import env "rts" "set_static_variable" [I64Type; I64Type] [];
+    E.add_func_import env "rts" "set_upgrade_instructions" [I64Type] [];
+    E.add_func_import env "rts" "get_upgrade_instructions" [] [I64Type];
+    E.add_func_import env "rts" "memcpy" [I64Type; I64Type; I64Type] [I64Type]; (* standard libc memcpy *)
+    E.add_func_import env "rts" "memcmp" [I64Type; I64Type; I64Type] [I32Type];
+    E.add_func_import env "rts" "version" [] [I64Type];
+    E.add_func_import env "rts" "parse_idl_header" [I32Type; I64Type; I64Type; I64Type; I64Type] [];
+    E.add_func_import env "rts" "idl_alloc_typtbl" [I64Type; I64Type; I64Type; I64Type; I64Type] [];
+    E.add_func_import env "rts" "idl_sub_buf_words" [I64Type; I64Type] [I64Type];
+    E.add_func_import env "rts" "idl_sub_buf_init" [I64Type; I64Type; I64Type] [];
+    E.add_func_import env "rts" "idl_sub"
+      [I64Type; I64Type; I64Type; I64Type; I64Type; I64Type; I64Type; I32Type; I32Type] [I32Type];
+    E.add_func_import env "rts" "leb128_decode" [I64Type] [I64Type];
+    E.add_func_import env "rts" "sleb128_decode" [I64Type] [I64Type];
+    E.add_func_import env "rts" "bigint_to_word32_wrap" [I64Type] [I32Type];
+    E.add_func_import env "rts" "bigint_of_word64" [I64Type] [I64Type];
+    E.add_func_import env "rts" "bigint_of_int64" [I64Type] [I64Type];
+    E.add_func_import env "rts" "bigint_of_float64" [F64Type] [I64Type];
+    E.add_func_import env "rts" "bigint_to_float64" [I64Type] [F64Type];
+    E.add_func_import env "rts" "bigint_to_word64_wrap" [I64Type] [I64Type];
+    E.add_func_import env "rts" "bigint_to_word64_trap" [I64Type] [I64Type];
+    E.add_func_import env "rts" "bigint_to_word64_trap_with" [I64Type; I64Type] [I64Type];
+    E.add_func_import env "rts" "bigint_eq" [I64Type; I64Type] [I32Type];
+    E.add_func_import env "rts" "bigint_isneg" [I64Type] [I32Type];
+    E.add_func_import env "rts" "bigint_count_bits" [I64Type] [I64Type];
+    E.add_func_import env "rts" "bigint_2complement_bits" [I64Type] [I64Type];
+    E.add_func_import env "rts" "bigint_lt" [I64Type; I64Type] [I32Type];
+    E.add_func_import env "rts" "bigint_gt" [I64Type; I64Type] [I32Type];
+    E.add_func_import env "rts" "bigint_le" [I64Type; I64Type] [I32Type];
+    E.add_func_import env "rts" "bigint_ge" [I64Type; I64Type] [I32Type];
+    E.add_func_import env "rts" "bigint_add" [I64Type; I64Type] [I64Type];
+    E.add_func_import env "rts" "bigint_sub" [I64Type; I64Type] [I64Type];
+    E.add_func_import env "rts" "bigint_mul" [I64Type; I64Type] [I64Type];
+    E.add_func_import env "rts" "bigint_rem" [I64Type; I64Type] [I64Type];
+    E.add_func_import env "rts" "bigint_div" [I64Type; I64Type] [I64Type];
+    E.add_func_import env "rts" "bigint_pow" [I64Type; I64Type] [I64Type];
+    E.add_func_import env "rts" "bigint_neg" [I64Type] [I64Type];
+    E.add_func_import env "rts" "bigint_lsh" [I64Type; I64Type] [I64Type];
+    E.add_func_import env "rts" "bigint_rsh" [I64Type; I64Type] [I64Type];
+    E.add_func_import env "rts" "bigint_abs" [I64Type] [I64Type];
+    E.add_func_import env "rts" "bigint_leb128_size" [I64Type] [I64Type];
+    E.add_func_import env "rts" "bigint_leb128_encode" [I64Type; I64Type] [];
+    E.add_func_import env "rts" "bigint_leb128_decode" [I64Type] [I64Type];
+    E.add_func_import env "rts" "bigint_leb128_decode_word64" [I64Type; I64Type; I64Type] [I64Type];
+    E.add_func_import env "rts" "bigint_sleb128_size" [I64Type] [I64Type];
+    E.add_func_import env "rts" "bigint_sleb128_encode" [I64Type; I64Type] [];
+    E.add_func_import env "rts" "bigint_sleb128_decode" [I64Type] [I64Type];
+    E.add_func_import env "rts" "bigint_sleb128_decode_word64" [I64Type; I64Type; I64Type] [I64Type];
+    E.add_func_import env "rts" "leb128_encode" [I64Type; I64Type] [];
+    E.add_func_import env "rts" "sleb128_encode" [I64Type; I64Type] [];
+    E.add_func_import env "rts" "utf8_valid" [I64Type; I64Type] [I32Type];
+    E.add_func_import env "rts" "utf8_validate" [I64Type; I64Type] [];
+    E.add_func_import env "rts" "skip_leb128" [I64Type] [];
+    E.add_func_import env "rts" "skip_any" [I64Type; I64Type; I32Type; I32Type] [];
+    E.add_func_import env "rts" "find_field" [I64Type; I64Type; I64Type; I32Type; I64Type] [I32Type];
+    E.add_func_import env "rts" "skip_fields" [I64Type; I64Type; I64Type; I64Type] [];
+    E.add_func_import env "rts" "remember_continuation" [I64Type] [I64Type];
+    E.add_func_import env "rts" "recall_continuation" [I64Type] [I64Type];
+    E.add_func_import env "rts" "peek_future_continuation" [I64Type] [I64Type];
+    E.add_func_import env "rts" "continuation_count" [] [I64Type];
+    E.add_func_import env "rts" "continuation_table_size" [] [I64Type];
+    E.add_func_import env "rts" "blob_of_text" [I64Type] [I64Type];
+    E.add_func_import env "rts" "text_compare" [I64Type; I64Type] [I64Type];
+    E.add_func_import env "rts" "text_concat" [I64Type; I64Type] [I64Type];
+    E.add_func_import env "rts" "text_iter_done" [I64Type] [I64Type];
+    E.add_func_import env "rts" "text_iter" [I64Type] [I64Type];
+    E.add_func_import env "rts" "text_iter_next" [I64Type] [I32Type];
+    E.add_func_import env "rts" "text_len" [I64Type] [I64Type];
+    E.add_func_import env "rts" "text_of_ptr_size" [I64Type; I64Type] [I64Type];
+    E.add_func_import env "rts" "text_singleton" [I32Type] [I64Type];
+    E.add_func_import env "rts" "text_size" [I64Type] [I64Type];
+    E.add_func_import env "rts" "text_to_buf" [I64Type; I64Type] [];
+    E.add_func_import env "rts" "text_lowercase" [I64Type] [I64Type];
+    E.add_func_import env "rts" "text_uppercase" [I64Type] [I64Type];
+    E.add_func_import env "rts" "region_init" [I64Type] [];
+    E.add_func_import env "rts" "alloc_region" [I64Type; I64Type; I64Type] [I64Type];
+    E.add_func_import env "rts" "init_region" [I64Type; I64Type; I64Type; I64Type] [];
+    E.add_func_import env "rts" "region_new" [] [I64Type];
+    E.add_func_import env "rts" "region_id" [I64Type] [I64Type];
+    E.add_func_import env "rts" "region_page_count" [I64Type] [I64Type];
+    E.add_func_import env "rts" "region_vec_pages" [I64Type] [I64Type];
+    E.add_func_import env "rts" "region_size" [I64Type] [I64Type];
+    E.add_func_import env "rts" "region_grow" [I64Type; I64Type] [I64Type];
+    E.add_func_import env "rts" "region_load_blob" [I64Type; I64Type; I64Type] [I64Type];
+    E.add_func_import env "rts" "region_store_blob" [I64Type; I64Type; I64Type] [];
+    E.add_func_import env "rts" "region_load_word8" [I64Type; I64Type] [I32Type];
+    E.add_func_import env "rts" "region_store_word8" [I64Type; I64Type; I32Type] [];
+    E.add_func_import env "rts" "region_load_word16" [I64Type; I64Type] [I32Type];
+    E.add_func_import env "rts" "region_store_word16" [I64Type; I64Type; I32Type] [];
+    E.add_func_import env "rts" "region_load_word32" [I64Type; I64Type] [I32Type];
+    E.add_func_import env "rts" "region_store_word32" [I64Type; I64Type; I32Type] [];
+    E.add_func_import env "rts" "region_load_word64" [I64Type; I64Type] [I64Type];
+    E.add_func_import env "rts" "region_store_word64" [I64Type; I64Type; I64Type] [];
+    E.add_func_import env "rts" "region_load_float64" [I64Type; I64Type] [F64Type];
+    E.add_func_import env "rts" "region_store_float64" [I64Type; I64Type; F64Type] [];
+    E.add_func_import env "rts" "region0_get" [] [I64Type];
+    E.add_func_import env "rts" "blob_of_principal" [I64Type] [I64Type];
+    E.add_func_import env "rts" "principal_of_blob" [I64Type] [I64Type];
+    E.add_func_import env "rts" "compute_crc32" [I64Type] [I32Type];
+    E.add_func_import env "rts" "blob_iter_done" [I64Type] [I64Type];
+    E.add_func_import env "rts" "blob_iter" [I64Type] [I64Type];
+    E.add_func_import env "rts" "blob_iter_next" [I64Type] [I64Type];
+    E.add_func_import env "rts" "pow" [F64Type; F64Type] [F64Type]; (* musl *)
+    E.add_func_import env "rts" "sin" [F64Type] [F64Type]; (* musl *)
+    E.add_func_import env "rts" "cos" [F64Type] [F64Type]; (* musl *)
+    E.add_func_import env "rts" "tan" [F64Type] [F64Type]; (* musl *)
+    E.add_func_import env "rts" "asin" [F64Type] [F64Type]; (* musl *)
+    E.add_func_import env "rts" "acos" [F64Type] [F64Type]; (* musl *)
+    E.add_func_import env "rts" "atan" [F64Type] [F64Type]; (* musl *)
+    E.add_func_import env "rts" "atan2" [F64Type; F64Type] [F64Type]; (* musl *)
+    E.add_func_import env "rts" "exp" [F64Type] [F64Type]; (* musl *)
+    E.add_func_import env "rts" "log" [F64Type] [F64Type]; (* musl *)
+    E.add_func_import env "rts" "fmod" [F64Type; F64Type] [F64Type]; (* remainder, musl *)
+    E.add_func_import env "rts" "float_fmt" [F64Type; I64Type; I64Type] [I64Type];
+    E.add_func_import env "rts" "char_to_upper" [I32Type] [I32Type];
+    E.add_func_import env "rts" "char_to_lower" [I32Type] [I32Type];
+    E.add_func_import env "rts" "char_is_whitespace" [I32Type] [I32Type];
+    E.add_func_import env "rts" "char_is_lowercase" [I32Type] [I32Type];
+    E.add_func_import env "rts" "char_is_uppercase" [I32Type] [I32Type];
+    E.add_func_import env "rts" "char_is_alphabetic" [I32Type] [I32Type];
+    E.add_func_import env "rts" "get_max_live_size" [] [I64Type];
+    E.add_func_import env "rts" "get_reclaimed" [] [I64Type];
+    E.add_func_import env "rts" "alloc_words" [I64Type] [I64Type];
+    E.add_func_import env "rts" "get_total_allocations" [] [I64Type];
+    E.add_func_import env "rts" "get_heap_size" [] [I64Type];
+    E.add_func_import env "rts" "alloc_blob" [I64Type; I64Type] [I64Type];
+    E.add_func_import env "rts" "alloc_array" [I64Type; I64Type] [I64Type];
+    E.add_func_import env "rts" "read_persistence_version" [] [I64Type];
+    E.add_func_import env "rts" "stop_gc_before_stabilization" [] [];
+    E.add_func_import env "rts" "start_gc_after_destabilization" [] [];
+    E.add_func_import env "rts" "is_graph_stabilization_started" [] [I32Type];
+    E.add_func_import env "rts" "start_graph_stabilization" [I64Type; I64Type; I64Type] [];
+    E.add_func_import env "rts" "graph_stabilization_increment" [] [I32Type];
+    E.add_func_import env "rts" "start_graph_destabilization" [I64Type; I64Type] [];
+    E.add_func_import env "rts" "graph_destabilization_increment" [] [I32Type];
+    E.add_func_import env "rts" "get_graph_destabilized_actor" [] [I64Type];
+    E.add_func_import env "rts" "buffer_in_32_bit_range" [] [I64Type];
+    ()
+
+end (* RTS *)
+
+module GC = struct
+  (* Record mutator/gc instructions counts *)
+
+  let instruction_counter env =
+    compile_const_32 0l ^^
+    E.call_import env "ic0" "performance_counter"
+
+  let register_globals env =
+    E.add_global64 env "__mutator_instructions" Mutable 0L;
+    E.add_global64 env "__collector_instructions" Mutable 0L
+
+  let get_mutator_instructions env =
+    G.i (GlobalGet (nr (E.get_global env "__mutator_instructions")))
+  let set_mutator_instructions env =
+    G.i (GlobalSet (nr (E.get_global env "__mutator_instructions")))
+
+  let get_collector_instructions env =
+    G.i (GlobalGet (nr (E.get_global env "__collector_instructions")))
+  let set_collector_instructions env =
+    G.i (GlobalSet (nr (E.get_global env "__collector_instructions")))
+
+  let record_mutator_instructions env =
+    match E.mode env with
+    | Flags.(ICMode | RefMode)  ->
+      instruction_counter env ^^
+      set_mutator_instructions env
+    | _ -> G.nop
+
+  let record_collector_instructions env =
+    match E.mode env with
+    | Flags.(ICMode | RefMode)  ->
+      instruction_counter env ^^
+      get_mutator_instructions env ^^
+      G.i (Binary (Wasm_exts.Values.I64 I64Op.Sub)) ^^
+      set_collector_instructions env
+    | _ -> G.nop
+
+  let collect_garbage env =
+    record_mutator_instructions env ^^
+    E.collect_garbage env false ^^
+    record_collector_instructions env
+
+end (* GC *)
+
+module Heap = struct
+  (* General heap object functionality (allocation, setting fields, reading fields) *)
+
+  (* Memory addresses are 64 bit (I64Type). *)
+  let word_size = 8L
+
+
+  (* The heap base global can only be used late, see conclude_module
+     and GHC.register *)
+  let get_heap_base env =
+    G.i (GlobalGet (nr (E.get_global env "__heap_base")))
+
+  let get_total_allocation env =
+    E.call_import env "rts" "get_total_allocations"
+
+  let get_reclaimed env =
+    E.call_import env "rts" "get_reclaimed"
+
+  let get_memory_size =
+    G.i MemorySize ^^
+    compile_mul_const page_size
+
+  let get_max_live_size env =
+    E.call_import env "rts" "get_max_live_size"
+
+  (* Static allocation (always words)
+     (uses dynamic allocation for smaller and more readable code) *)
+  let alloc env (n : int64) : G.t =
+    compile_unboxed_const n ^^
+    E.call_import env "rts" "alloc_words"
+
+  (* Heap objects *)
+
+  (* At this level of abstraction, heap objects are just flat arrays of words *)
+
+  let load_field (i : int64) : G.t =
+    let offset = Int64.(add (mul word_size i) ptr_unskew) in
+    G.i (Load {ty = I64Type; align = 3; offset; sz = None})
+
+  let store_field (i : int64) : G.t =
+    let offset = Int64.(add (mul word_size i) ptr_unskew) in
+    G.i (Store {ty = I64Type; align = 3; offset; sz = None})
+
+  (* Or even as a single 64 bit float *)
+
+  let load_field_float64 (i : int64) : G.t =
+    let offset = Int64.(add (mul word_size i) ptr_unskew) in
+    G.i (Load {ty = F64Type; align = 3; offset; sz = None})
+
+  let store_field_float64 (i : int64) : G.t =
+    let offset = Int64.(add (mul word_size i) ptr_unskew) in
+    G.i (Store {ty = F64Type; align = 3; offset; sz = None})
+
+  (* Convenience functions related to memory *)
+  (* Copying bytes (works on unskewed memory addresses) *)
+  let memcpy env = E.call_import env "rts" "memcpy" ^^ G.i Drop
+  (* Comparing bytes (works on unskewed memory addresses) *)
+  let memcmp env = E.call_import env "rts" "memcmp" ^^ G.i (Convert (Wasm_exts.Values.I64 I64Op.ExtendUI32))
+
+  let register env =
+    let get_heap_base_fn = E.add_fun env "get_heap_base" (Func.of_body env [] [I64Type] (fun env ->
+      get_heap_base env
+    )) in
+
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "get_heap_base";
+      edesc = nr (FuncExport (nr get_heap_base_fn))
+    })
+
+  let get_heap_size env =
+    E.call_import env "rts" "get_heap_size"
+
+  let get_static_variable env index = 
+    compile_unboxed_const index ^^
+    E.call_import env "rts" "get_static_variable"
+
+end (* Heap *)
+
+module Stack = struct
+  (* The RTS includes C code which requires a shadow stack in linear memory.
+     We reserve some space for it at the beginning of memory space (just like
+     wasm-l would), this way stack overflow would cause out-of-memory, and not
+     just overwrite static data.
+
+     We sometimes use the stack space if we need small amounts of scratch space.
+
+     All pointers here are unskewed.
+
+     (We report logical stack overflow as "RTS Stack underflow" as the stack
+     grows downwards.)
+  *)
+
+  (* Predefined constant stack size of 4MB, according to the persistent memory layout. *)
+  let stack_size = 4 * 1024 * 1024
+
+  let end_ () = Int64.of_int stack_size 
+
+  let register_globals env =
+    (* stack pointer *)
+    E.add_global64 env "__stack_pointer" Mutable (end_());
+    (* frame pointer *)
+    E.add_global64 env "__frame_pointer" Mutable (end_());
+    (* low watermark *)
+    if !Flags.measure_rts_stack then
+      E.add_global64 env "__stack_min" Mutable (end_());
+    E.export_global env "__stack_pointer"
+
+  let get_stack_ptr env =
+    G.i (GlobalGet (nr (E.get_global env "__stack_pointer")))
+  let set_stack_ptr env =
+    G.i (GlobalSet (nr (E.get_global env "__stack_pointer")))
+
+  let get_min env =
+    G.i (GlobalGet (nr (E.get_global env "__stack_min")))
+  let set_min env =
+    G.i (GlobalSet (nr (E.get_global env "__stack_min")))
+
+  let get_max_stack_size env =
+    if !Flags.measure_rts_stack then
+      compile_unboxed_const (end_()) ^^
+      get_min env ^^
+      G.i (Binary (Wasm_exts.Values.I64 I64Op.Sub))
+    else (* report max available *)
+      compile_unboxed_const (end_())
+
+  let update_stack_min env =
+    if !Flags.measure_rts_stack then
+    get_stack_ptr env ^^
+    get_min env ^^
+    compile_comparison I64Op.LtU ^^
+    (E.if0
+       (get_stack_ptr env ^^
+        set_min env)
+      G.nop)
+    else G.nop
+
+  let stack_overflow env =
+    Func.share_code0 Func.Never env "stack_overflow" [] (fun env ->
+      (* read last word of reserved page to force trap *)
+      compile_unboxed_const 0xFFFF_FFFF_FFFF_FFFCL ^^
+      G.i (Load {ty = I64Type; align = 3; offset = 0L; sz = None}) ^^
+      G.i Unreachable
+    )
+
+  let alloc_words env n =
+    let n_bytes = Int64.mul n Heap.word_size in
+    (* avoid absurd allocations *)
+    assert (Int64.(to_int n_bytes) < stack_size);
+    (* alloc words *)
+    get_stack_ptr env ^^
+    compile_unboxed_const n_bytes ^^
+    G.i (Binary (Wasm_exts.Values.I64 I64Op.Sub)) ^^
+    set_stack_ptr env ^^
+    update_stack_min env ^^
+    get_stack_ptr env ^^
+    (* check for stack overflow, if necessary *)
+    if n_bytes >= page_size then
+      get_stack_ptr env ^^
+      G.i (Unary (Wasm_exts.Values.I64 I64Op.Clz)) ^^
+      E.if0
+        G.nop (* we found leading zeros, i.e. no wraparound *)
+        (stack_overflow env)
+    else
+      G.nop
+
+  let free_words env n =
+    get_stack_ptr env ^^
+    compile_unboxed_const (Int64.mul n Heap.word_size) ^^
+    G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)) ^^
+    set_stack_ptr env
+
+  (* TODO: why not just remember and reset the stack pointer, instead of calling free_words? Also below *)
+  let with_words env name n f =
+    let (set_x, get_x) = new_local env name in
+    alloc_words env n ^^ set_x ^^
+    f get_x ^^
+    free_words env n
+
+  let dynamic_alloc_words env get_n =
+    get_stack_ptr env ^^
+    compile_divU_const Heap.word_size ^^
+    get_n ^^
+    compile_comparison I64Op.LtU ^^
+    (E.if0
+      (stack_overflow env)
+      G.nop) ^^
+    get_stack_ptr env ^^
+    get_n ^^
+    compile_mul_const Heap.word_size ^^
+    G.i (Binary (Wasm_exts.Values.I64 I64Op.Sub)) ^^
+    set_stack_ptr env ^^
+    update_stack_min env ^^
+    get_stack_ptr env
+
+  let dynamic_free_words env get_n =
+    get_stack_ptr env ^^
+    get_n ^^
+    compile_mul_const Heap.word_size ^^
+    G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)) ^^
+    set_stack_ptr env
+
+  (* TODO: why not just remember and reset the stack pointer, instead of calling free_words? Also above*)
+  let dynamic_with_words env name f =
+    let (set_n, get_n) = new_local env "n" in
+    let (set_x, get_x) = new_local env name in
+    set_n ^^
+    dynamic_alloc_words env get_n ^^ set_x ^^
+    f get_x ^^
+    dynamic_free_words env get_n
+
+  let dynamic_with_bytes env name f =
+    (* round up to nearest wordsize *)
+    compile_add_const (Int64.sub Heap.word_size 1L) ^^
+    compile_divU_const Heap.word_size ^^
+    dynamic_with_words env name f
+
+  (* Stack Frames *)
+
+  (* Traditional frame pointer for accessing statically allocated locals/args (all words)
+     Used (sofar) only in serialization to compress Wasm stack
+     at cost of expanding Rust/C Stack (whose size we control)*)
+  let get_frame_ptr env =
+    G.i (GlobalGet (nr (E.get_global env "__frame_pointer")))
+  let set_frame_ptr env =
+    G.i (GlobalSet (nr (E.get_global env "__frame_pointer")))
+
+  (* Frame pointer operations *)
+
+  (* Enter/exit a new frame of `n` words, saving and restoring prev frame pointer *)
+  let with_frame env name n f =
+    (* reserve space for n words + saved frame_ptr *)
+    alloc_words env (Int64.add n 1L) ^^
+    (* store the current frame_ptr at offset 0 *)
+    get_frame_ptr env ^^
+    G.i (Store {ty = I64Type; align = 3; offset = 0L; sz = None}) ^^
+    get_stack_ptr env ^^
+    (* set_frame_ptr to stack_ptr *)
+    set_frame_ptr env ^^
+    (* do as f *)
+    f () ^^
+    (* assert frame_ptr == stack_ptr *)
+    get_frame_ptr env ^^
+    get_stack_ptr env ^^
+    compile_comparison I64Op.Eq ^^
+    E.else_trap_with env "frame_ptr <> stack_ptr" ^^
+    (* restore the saved frame_ptr *)
+    get_frame_ptr env ^^
+    G.i (Load {ty = I64Type; align = 3; offset = 0L; sz = None}) ^^
+    set_frame_ptr env ^^
+    (* free the frame *)
+    free_words env (Int64.add n 1L)
+
+  (* read local n of current frame *)
+  let get_local env n =
+    let offset = Int64.mul (Int64.add n 1L) Heap.word_size in
+    get_frame_ptr env ^^
+      G.i (Load { ty = I64Type; align = 3; offset; sz = None})
+
+  (* read local n of previous frame *)
+  let get_prev_local env n =
+    let offset = Int64.mul (Int64.add n 1L) Heap.word_size in
+    (* indirect through save frame_ptr at offset 0 *)
+    get_frame_ptr env ^^
+    G.i (Load { ty = I64Type; align = 3; offset = 0L; sz = None}) ^^
+    G.i (Load { ty = I64Type; align = 3; offset; sz = None})
+
+  (* set local n of current frame *)
+  let set_local env n =
+    let offset = Int64.mul (Int64.add n 1L) Heap.word_size in
+    Func.share_code1 Func.Never env ("set_local %i" ^ Int64.to_string n) ("val", I64Type) []
+      (fun env get_val ->
+         get_frame_ptr env ^^
+         get_val ^^
+         G.i (Store { ty = I64Type; align = 3; offset; sz = None}))
+
+end (* Stack *)
+
+
+module ContinuationTable = struct
+  (* See rts/motoko-rts/src/closure_table.rs *)
+  let remember env : G.t = E.call_import env "rts" "remember_continuation"
+  let recall env : G.t = E.call_import env "rts" "recall_continuation"
+  let peek_future env : G.t = E.call_import env "rts" "peek_future_continuation"
+  let count env : G.t = E.call_import env "rts" "continuation_count"
+  let size env : G.t = E.call_import env "rts" "continuation_table_size"
+end (* ContinuationTable *)
+
+module Bool = struct
+  (* Boolean literals are either 0 or non-zero (e.g. if they origin from RTS or external API).
+     They need not be shifted before put in the heap,
+     because the "zero page" never contains GC-ed objects
+  *)
+
+  let vanilla_lit = function
+    | false -> 0L
+    | true -> 1L (* or any other non-zero value *)
+
+  let lit b = compile_unboxed_const (vanilla_lit b)
+  
+  let lit_rts_int32 b = compile_const_32 (Int64.to_int32 (vanilla_lit b))
+  
+  let neg = compile_test I64Op.Eqz
+
+  let from_rts_int32 = 
+    G.i (Convert (Wasm_exts.Values.I64 I64Op.ExtendUI32))
+
+  let to_rts_int32 =
+    G.i (Convert (Wasm_exts.Values.I32 I32Op.WrapI64))
+
+  let from_int64 =
+    compile_unboxed_const 0L ^^
+    compile_comparison I64Op.Ne
+
+end (* Bool *)
+
+module BitTagged = struct
+
+  (* This module takes care of pointer tagging:
+
+     A pointer to an object at offset `i` on the heap is represented as
+     `i-1`, so the low two bits of the pointer are always set (0b…11).
+     We call `i-1` a *skewed* pointer, in a feeble attempt to avoid the term
+     shifted, which may sound like a logical shift.
+
+     We use the constants ptr_skew and ptr_unskew to change a pointer as a
+     signpost where we switch between raw pointers to skewed ones.
+
+     This means we can store a small unboxed scalar x as (x `lsl` 1), and still
+     tell it apart from a pointer by looking at the last bits: if set, it is a
+     pointer.
+
+     Small here means:
+
+     * 0 ≤ x < 2^(ubits ty) for an unsigned type ty with (ubits ty) payload bits
+     * -2^sbits ≤ x < 2^sbits, for a signed type ty with (sbits ty) (= (ubits ty) - 1) payload bits
+       (i.e. excluding sign bit),
+     with the exception that compact Nat is regarded as signed to support subtyping.
+
+     Tagging needs to happen with a
+     * shift left by (32-ubits ty) for a signed or unsigned type ty; then
+     * a logical or of the (variable length) tag bits for ty.
+
+     Untagging needs to happen with an
+     * logical right shift (for unsigned type ty in Nat{8,16,32,64}, Char).
+     * _arithmetic_ right shift (for signed type ty Int{8,16,32,64}, Int but also Nat).
+       This is the right thing to do for signed numbers.
+       Nat is treated as signed to allow coercion-free subtyping.
+
+     The low bits 32 - (ubits ty) store the tag bits of the value.
+
+     Boolean false is a non-pointer by construction.
+     Boolean true (1) needs not be shifted as GC will not consider it.
+
+     Summary:
+
+       0b…11: A pointer
+       0b…x0: A shifted scalar
+       0b000: `false`
+       0b001: `true`
+
+     Note that {Nat,Int}{8,16,32} and compact {Int,Nat}64 and compact Int, Nat are explicitly tagged.
+     The bits are stored in the _most_ significant bits of the `i64`,
+     with the lower bits storing the variable length tag.
+
+     {Int,Nat}64 are stored in signed and unsigned forms.
+
+     Compact {Int,Nat} are (both) stored in signed form to support coercion free subtyping of Nat < Int.
+     That means that one bit, the highest bit, of the compact Nat representation is unused and the
+     representable range for both compact Int and Nat values is -2^(sbits Int) ≤ x < 2^(sbits Int).
+
+     This describes the vanilla representation of small and compact scalars,
+     used as the uniform representation of values and when stored in heap structures.
+
+     See module TaggedSmallWord.
+
+     The stack representation of a small scalars, UnboxedWord64 {Int,Nat}{8,16,32},
+     on the other hand, always has all tag bits cleared, with the payload in the high bits of the word.
+
+     The stack representation of compact or unboxed scalars or UnboxedWord64 {Int,Nat}64, 
+     on the other hand, is the natural (unpadded) machine representation.
+
+     All arithmetic is implemented directly on the stack (not vanilla) representation of scalars.
+     Proper tags bits are removed/added when loading from vanilla or storing to vanilla representation.
+
+  *)
+  let is_true_literal env =
+    compile_eq_const 1L
+
+  (* Note: `true` is not handled here, needs specific check where needed. *)
+  let if_tagged_scalar env retty is1 is2 =
+    compile_bitand_const 0x1L ^^
+    E.if_ env retty is2 is1
+
+  (* With two bit-tagged pointers on the stack, decide
+     whether both are scalars and invoke is1 (the fast path)
+     if so, and otherwise is2 (the slow path).
+     Note: `true` is not handled here, needs specific check where needed.
+  *)
+  let if_both_tagged_scalar env retty is1 is2 =
+    G.i (Binary (Wasm_exts.Values.I64 I64Op.Or)) ^^
+    compile_bitand_const 0x1L ^^
+    E.if_ env retty is2 is1
+
+  let ubits_of pty = TaggingScheme.ubits_of pty
+
+  let sbits_of pty = (ubits_of pty) - 1
+
+  (* 64 bit numbers *)
+
+  (* static *)
+  let can_tag_const pty (n : int64) = Type.(
+    match pty with
+    | Nat | Int | Int64 | Int32 ->
+      let sbits = sbits_of pty in
+      let lower_bound = Int64.(neg (shift_left 1L sbits)) in
+      let upper_bound = Int64.shift_left 1L sbits in
+      lower_bound <= n && n < upper_bound
+    | Nat64 | Nat32 ->
+      let ubits = ubits_of pty in
+      let upper_bound = Int64.shift_left 1L ubits in
+      0L <= n && n < upper_bound
+    | _ -> assert false)
+
+  let tag_const pty i = Type.(
+    match pty with
+    |  Nat | Int | Int64 | Int32
+    |  Nat64 | Nat32 ->
+      Int64.shift_left i (64 - ubits_of pty)
+      (* tag *)
+      |> Int64.logor (TaggingScheme.tag_of_typ pty)
+    | _ -> assert false)
+
+  (* dynamic *)
+  let sanity_check_can_tag_signed env pty get_x =
+    if TaggingScheme.debug || !Flags.sanity then
+      get_x ^^
+      Func.share_code2 Func.Always env (prim_fun_name pty "check_can_tag_i64") (("res", I64Type), ("x", I64Type)) [I64Type]
+        (fun env get_res get_x -> Type.(
+          match pty with
+          | Nat | Int | Int64 | Int32 ->
+            let sbits = sbits_of pty in
+            let lower_bound = Int64.(neg (shift_left 1L sbits)) in
+            let upper_bound = Int64.shift_left 1L sbits in
+            (* lower_bound <= x < upper_bound *)
+            compile_unboxed_const lower_bound ^^
+            get_x ^^
+            compile_comparison I64Op.LeS ^^
+            get_x ^^ compile_unboxed_const upper_bound ^^
+            compile_comparison I64Op.LtS ^^
+            G.i (Binary (Wasm_exts.Values.I64 I64Op.And))
+         | Nat64 | Nat32 ->
+            let ubits = ubits_of pty in
+            let upper_bound = Int64.shift_left 1L ubits in
+            (* 0 <= x < upper_bound *)
+            get_x ^^ compile_unboxed_const upper_bound ^^
+            compile_comparison I64Op.LtU
+         | _ ->
+            assert false) ^^
+         get_res ^^
+         compile_comparison I64Op.Eq ^^
+         E.else_trap_with env (prim_fun_name pty "check_can_tag_i64") ^^
+         get_res)
+    else
+      G.nop
+
+  let if_can_tag_signed env pty retty is1 is2 = Type.(
+    match pty with
+    | Nat | Int | Int64 ->
+      Func.share_code1 Func.Never env
+        (prim_fun_name pty "if_can_tag_i64") ("x", I64Type) [I64Type] (fun env get_x ->
+        (* checks that all but the low sbits are either all 0 or all 1 *)
+        get_x ^^
+        get_x ^^ compile_shrS_const (Int64.of_int ((64 - sbits_of pty))) ^^
+        G.i (Binary (Wasm_exts.Values.I64 I32Op.Xor)) ^^
+        compile_shrU_const (Int64.of_int (sbits_of pty)) ^^
+        compile_test I64Op.Eqz ^^
+        sanity_check_can_tag_signed env pty get_x) ^^
+      E.if_ env retty is1 is2
+    | Nat64 ->
+      Func.share_code1 Func.Never env
+         (prim_fun_name pty "if_can_tag_i64") ("x", I64Type) [I64Type] (fun env get_x ->
+          (* checks that all but the low ubits are 0 *)
+          get_x ^^ compile_shrU_const (Int64.of_int (ubits_of pty)) ^^
+          compile_test I64Op.Eqz ^^
+          sanity_check_can_tag_signed env pty get_x) ^^
+      E.if_ env retty is1 is2
+     | _ -> assert false)
+
+  let if_can_tag_unsigned env pty retty is1 is2 = Type.(
+    match pty with
+    |  Nat | Int | Int64 ->
+      let sbitsL = Int64.of_int (sbits_of pty) in
+      compile_shrU_const sbitsL ^^
+      compile_test I64Op.Eqz ^^
+      E.if_ env retty is1 is2
+    | Nat64 ->
+      let ubitsL = Int64.of_int (ubits_of pty) in
+      compile_shrU_const ubitsL ^^
+      E.if_ env retty is2 is1 (* NB: swapped branches *)
+    | _ -> assert false)
+
+  let tag env pty = (* TBR *)
+    let ubitsl = Int64.of_int (ubits_of pty) in
+    compile_shl_const (Int64.sub 64L ubitsl) ^^
+    (* tag *)
+    compile_bitor_const (TaggingScheme.tag_of_typ pty)
+
+  let sanity_check_tag line env ty =
+    if TaggingScheme.debug || !(Flags.sanity) then
+      let name =
+        (prim_fun_name ty "sanity_check_tag") ^
+          (if TaggingScheme.debug then Int.to_string line else "")
+      in
+      let tag_mask = Int64.(sub (shift_left 1L (64 - TaggingScheme.ubits_of ty)) one) in
+      (Func.share_code1 Func.Always env name ("v", I64Type) [I64Type] (fun env get_n ->
+         get_n ^^
+         compile_bitand_const tag_mask ^^
+         compile_eq_const (TaggingScheme.tag_of_typ ty) ^^
+         E.else_trap_with env "unexpected tag" ^^
+         get_n))
+    else G.nop
+
+  let untag line env pty = Type.(match pty with
+    | Nat | Int | Int64 | Int32 | Int16 | Int8 ->
+      let ubitsl = Int64.of_int (ubits_of pty) in
+      sanity_check_tag line env pty ^^
+      compile_shrS_const (Int64.sub 64L ubitsl)
+    | Nat64 | Nat32 | Nat16 | Nat8 ->
+      let ubitsl = Int64.of_int (ubits_of pty) in
+      sanity_check_tag line env pty ^^
+      compile_shrU_const (Int64.sub 64L ubitsl)
+    | _ -> assert false)
+
+  let clear_tag env pty =
+    if TaggingScheme.tag_of_typ pty <> 0L then
+      let shift_amount = 64 - ubits_of pty in
+      let mask = Int64.(lognot (sub (shift_left one shift_amount) one)) in
+      compile_bitand_const mask
+    else G.nop
+
+end (* BitTagged *)
+
+module Tagged = struct
+  (* Tagged objects all have an object header consisting of a tag and a forwarding pointer.
+     The tag is to describe their runtime type and serves to traverse the heap
+     (serialization, GC), but also for objectification of arrays.
+
+     The tag is a word at the beginning of the object.
+
+     The (skewed) forwarding pointer supports object moving in the incremental garbage collection.
+
+         obj header
+     ┌──────┬─────────┬──
+     │ tag  │ fwd ptr │ ...
+     └──────┴─────────┴──
+
+     Attention: This mapping is duplicated in these places
+       * here
+       * motoko-rts/src/types.rs
+       * motoko-rts/src/text.rs
+       * motoko-rts/src/memory.rs
+       * motoko-rts/src/bigint.rs
+       * motoko-rts/src/blob-iter.rs
+       * motoko-rts/src/static-checks.rs
+       * In all GC implementations in motoko-rts/src/gc/
+     so update all!
+   *)
+
+  type bits_sort =
+    | U (* signed *)
+    | S (* unsigned *)
+    | F (* float *)
+  type array_sort =
+    | I (* [ T ] *)
+    | M (* [var T ] *)
+    | T (* (T,+) *)
+    | S (* shared ... -> ... *)
+  type blob_sort =
+    | B (* Blob *)
+    | T (* Text *)
+    | P (* Principal *)
+    | A (* actor { ... } *)
+
+  type [@warning "-37"] tag  =
+    | Object
+    | Array of array_sort (* Also a tuple *)
+    | Bits64 of bits_sort (* Contains a 64 bit number *)
+    | MutBox (* used for mutable heap-allocated variables *)
+    | Closure
+    | Some (* For opt *)
+    | Variant
+    | Blob of blob_sort
+    | Indirection (* Only used by the GC *)
+    | BigInt
+    | Concat (* String concatenation, used by rts/text.c *)
+    | OneWordFiller (* Only used by the RTS *)
+    | FreeSpace (* Only used by the RTS *)
+    | Region
+    | ArraySliceMinimum (* Used by the GC for incremental array marking *)
+    | StableSeen (* Marker that we have seen this thing before *)
+    | CoercionFailure (* Used in the Candid decoder. Static singleton! *)
+
+  (* Tags needs to have the lowest bit set, to allow distinguishing object
+     headers from heap locations (object or field addresses).
+
+     (Reminder: objects and fields are word-aligned so will have the lowest two
+     bits unset) *)
+  let int_of_tag = function
+    | Object -> 1L
+    | Array I -> 3L
+    | Array M -> 5L
+    | Array T -> 7L
+    | Array S -> 9L
+    | Bits64 U -> 11L
+    | Bits64 S -> 13L
+    | Bits64 F -> 15L
+    | MutBox -> 17L
+    | Closure -> 19L
+    | Some -> 21L
+    | Variant -> 23L
+    | Blob B -> 25L
+    | Blob T -> 27L
+    | Blob P -> 29L
+    | Blob A -> 31L
+    | Indirection -> 33L
+    | BigInt -> 35L
+    | Concat -> 37L
+    | Region -> 39L
+    | OneWordFiller -> 41L
+    | FreeSpace -> 43L
+    | ArraySliceMinimum -> 44L
+    (* Next two tags won't be seen by the GC, so no need to set the lowest bit
+       for `CoercionFailure` and `StableSeen` *)
+    | CoercionFailure -> 0xffff_ffff_ffff_fffeL
+    | StableSeen -> 0xffff_ffff_ffff_ffffL
+
+  (*
+     The null pointer is the sentinel `0xffff_ffff_ffff_fffbL` (skewed representation).
+    
+     This serves for efficient null tests by using direct pointer comparison.
+     The null pointer must not be dereferenced.
+     Null tests are possible without resolving the forwarding pointer of a non-null comparand.
+  *)
+
+  let null_vanilla_pointer = 0xffff_ffff_ffff_fffbL (* skewed, pointing to last unallocated Wasm page *)
+  let null_pointer = compile_unboxed_const null_vanilla_pointer
+
+  let is_null env = compile_eq_const null_vanilla_pointer
+
+  let not_null env =
+    (* null test works without forwarding pointer resolution of a non-null comparand *)
+    null_pointer ^^
+    compile_comparison I64Op.Ne
+
+  let header_size = 2L
+  
+  (* The tag *)
+  let tag_field = 0L
+  let forwarding_pointer_field = 1L
+
+  (* Note: post-allocation barrier must be applied after initialization *)
+  let alloc env size tag =
+    assert (size > 1L);
+    let name = Printf.sprintf "alloc_size<%d>_tag<%d>" (Int64.to_int size) (Int64.to_int (int_of_tag tag)) in
+
+    Func.share_code0 Func.Never env name [I64Type] (fun env ->
+      let set_object, get_object = new_local env "new_object" in
+      Heap.alloc env size ^^
+      set_object ^^ get_object ^^
+      compile_unboxed_const (int_of_tag tag) ^^
+      Heap.store_field tag_field ^^
+      get_object ^^ (* object pointer *)
+      get_object ^^ (* forwarding pointer *)
+      Heap.store_field forwarding_pointer_field ^^
+      get_object
+    )
+
+  let load_forwarding_pointer env =
+    Heap.load_field forwarding_pointer_field
+
+  let store_tag env tag =
+    load_forwarding_pointer env ^^
+    compile_unboxed_const (int_of_tag tag) ^^
+    Heap.store_field tag_field
+
+  let load_tag env =
+    load_forwarding_pointer env ^^
+    Heap.load_field tag_field
+
+  let sanity_check_tag line env tag =
+    let tag = int_of_tag tag in
+    let name = "sanity_check_tag_" ^ Int64.to_string tag ^
+                 (if TaggingScheme.debug then Int.to_string line else "")
+    in
+    if TaggingScheme.debug || !Flags.sanity then
+      Func.share_code1 Func.Always env name ("obj", I64Type) [I64Type]
+        (fun env get_obj ->
+         get_obj ^^
+         load_tag env  ^^
+         compile_unboxed_const tag ^^
+         compile_comparison I64Op.Eq ^^
+         E.else_trap_with env name ^^
+         get_obj)
+    else
+      G.nop
+
+  let check_forwarding env unskewed =
+    let name = "check_forwarding_" ^ if unskewed then "unskewed" else "skewed" in
+    Func.share_code1 Func.Always env name ("object", I64Type) [I64Type] (fun env get_object ->
+      let set_object = G.setter_for get_object in
+      (if unskewed then
+        get_object ^^
+        compile_unboxed_const ptr_skew ^^
+        G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)) ^^
+        set_object
+      else G.nop) ^^
+      get_object ^^
+      load_forwarding_pointer env ^^
+      get_object ^^
+      compile_comparison I64Op.Eq ^^
+      E.else_trap_with env "missing object forwarding" ^^
+      get_object ^^
+      (if unskewed then
+        compile_unboxed_const ptr_unskew ^^
+        G.i (Binary (Wasm_exts.Values.I64 I64Op.Add))
+      else G.nop))
+
+  let check_forwarding_for_store env typ =
+    let (set_value, get_value, _) = new_local_ env typ "value" in
+    set_value ^^ check_forwarding env false ^^ get_value
+
+  let load_field env index =
+    (if !Flags.sanity then check_forwarding env false else G.nop) ^^
+    Heap.load_field index
+
+  let store_field env index =
+    (if !Flags.sanity then check_forwarding_for_store env I64Type else G.nop) ^^
+    Heap.store_field index
+
+  let load_field_float64 env index =
+    (if !Flags.sanity then check_forwarding env false else G.nop) ^^
+    Heap.load_field_float64 index
+
+  let store_field_float64 env index =
+    (if !Flags.sanity then check_forwarding_for_store env F64Type else G.nop) ^^
+    Heap.store_field_float64 index
+
+  (* Branches based on the tag of the object pointed to,
+     leaving the object on the stack afterwards. *)
+  let branch_default env retty def (cases : (tag * G.t) list) : G.t =
+    let (set_tag, get_tag) = new_local env "tag" in
+
+    let rec go = function
+      | [] -> def
+      | ((tag, code) :: cases) ->
+        get_tag ^^
+        compile_eq_const (int_of_tag tag) ^^
+        E.if_ env retty code (go cases)
+    in
+    load_tag env ^^
+    set_tag ^^
+    go cases
+
+  let allocation_barrier env =
+    E.call_import env "rts" "allocation_barrier"
+
+  let write_with_barrier env =
+    let (set_value, get_value) = new_local env "written_value" in
+    let (set_location, get_location) = new_local env "write_location" in
+    set_value ^^ set_location ^^
+    (* performance gain by first checking the GC state *)
+    E.call_import env "rts" "running_gc" ^^
+    Bool.from_rts_int32 ^^
+    E.if0 (
+      get_location ^^ get_value ^^
+      E.call_import env "rts" "write_with_barrier"
+    ) (
+      get_location ^^ get_value ^^
+      store_unskewed_ptr
+    )
+
+  let obj env tag element_instructions : G.t =
+    let n = List.length element_instructions in
+    let size = (Int64.add (Wasm.I64.of_int_u n) header_size) in
+    let (set_object, get_object) = new_local env "new_object" in
+    alloc env size tag ^^
+    set_object ^^
+    let init_elem idx instrs : G.t =
+      get_object ^^
+      instrs ^^
+      Heap.store_field (Int64.add (Wasm.I64.of_int_u idx) header_size)
+    in
+    G.concat_mapi init_elem element_instructions ^^
+    get_object ^^
+    allocation_barrier env
+
+  let shared_object env allocation =
+    let index = E.object_pool_add env allocation in
+    E.SharedObject index
+
+  let materialize_shared_value env = function
+  | E.Vanilla vanilla -> compile_unboxed_const vanilla
+  | E.SharedObject index -> Heap.get_static_variable env index
+
+  let share env allocation =
+    materialize_shared_value env (shared_object env allocation)
+
+end (* Tagged *)
+
+module MutBox = struct
+  (*
+      Mutable heap objects
+
+       ┌──────┬─────┬─────────┐
+       │ obj header │ payload │
+       └──────┴─────┴─────────┘
+
+     The object header includes the obj tag (MutBox) and the forwarding pointer.
+  *)
+
+  let field = Tagged.header_size
+
+  let alloc env =
+    Tagged.obj env Tagged.MutBox [ compile_unboxed_zero ]
+
+  let load_field env =
+    Tagged.load_forwarding_pointer env ^^
+    Tagged.load_field env field
+
+  let store_field env =
+    let (set_mutbox_value, get_mutbox_value) = new_local env "mutbox_value" in
+    set_mutbox_value ^^
+    Tagged.load_forwarding_pointer env ^^
+    get_mutbox_value ^^
+    Tagged.store_field env field
+  
+  let add_global_mutbox env =
+    E.object_pool_add env alloc
+end
+
+
+module Opt = struct
+  (* The Option type. Optional values are represented as
+
+    1. The null literal being the sentinel null pointer value, see above.
+       
+    2. ┌──────┬─────────┐
+       │ some │ payload │
+       └──────┴─────────┘
+
+       A heap-allocated box for `?v` values. Should only ever contain null or
+       another such box.
+
+    3. Anything else (pointer or unboxed scalar): Constituent value, implicitly
+       injected into the opt type.
+
+    This way, `?t` is represented without allocation, with the only exception of
+    the value `?ⁿnull` for n>0.
+
+    NB: `?ⁿnull` is essentially represented by the unary encoding of the number
+    of n. This could be optimized further, by storing `n` in the Some payload,
+    instead of a pointer, but unlikely worth it.
+
+  *)
+
+  let some_payload_field = Tagged.header_size
+
+  let null_vanilla_lit = Tagged.null_vanilla_pointer
+  let null_lit env = Tagged.null_pointer
+
+  let is_null = Tagged.is_null
+  let is_some = Tagged.not_null
+
+  let alloc_some env get_payload =
+    Tagged.obj env Tagged.Some [ get_payload ]
+
+  let inject env e =
+    e ^^
+    Func.share_code1 Func.Never env "opt_inject" ("x", I64Type) [I64Type] (fun env get_x ->
+      get_x ^^ BitTagged.if_tagged_scalar env [I64Type]
+        ( get_x ) (* scalar, no wrapping *)
+        ( get_x ^^ BitTagged.is_true_literal env ^^ (* exclude true literal since `branch_default` follows the forwarding pointer *)
+          E.if_ env [I64Type]
+            ( get_x ) (* true literal, no wrapping *)
+            ( get_x ^^ is_some env ^^
+              E.if_ env [I64Type]
+                ( get_x ^^ Tagged.branch_default env [I64Type]
+                  ( get_x ) (* default tag, no wrapping *)
+                  [ Tagged.Some, alloc_some env get_x ]
+                )
+                ( alloc_some env get_x ) (* ?ⁿnull for n > 0 *)
+            )
+        )
+    )
+
+  let constant env = function
+  | E.Vanilla value when value = null_vanilla_lit -> Tagged.shared_object env (fun env -> alloc_some env (null_lit env)) (* ?ⁿnull for n > 0 *)
+  | E.Vanilla value -> E.Vanilla value (* not null and no `Opt` object *)
+  | shared_value ->
+    Tagged.shared_object env (fun env -> 
+      let materialized_value = Tagged.materialize_shared_value env shared_value in  
+      inject env materialized_value (* potentially wrap in new `Opt` *)
+    )
+
+  (* This function is used where conceptually, Opt.inject should be used, but
+  we know for sure that it wouldn’t do anything anyways, except dereferencing the forwarding pointer *)
+  let inject_simple env e =
+    e ^^ Tagged.load_forwarding_pointer env
+
+  let load_some_payload_field env =
+    Tagged.load_forwarding_pointer env ^^
+    Tagged.load_field env some_payload_field
+
+  let project env =
+    Func.share_code1 Func.Never env "opt_project" ("x", I64Type) [I64Type] (fun env get_x ->
+      get_x ^^ BitTagged.if_tagged_scalar env [I64Type]
+        ( get_x ) (* scalar, no wrapping *)
+        ( get_x ^^ BitTagged.is_true_literal env ^^ (* exclude true literal since `branch_default` follows the forwarding pointer *)
+          E.if_ env [I64Type]
+            ( get_x ) (* true literal, no wrapping *)
+            ( get_x ^^ Tagged.branch_default env [I64Type]
+              ( get_x ) (* default tag, no wrapping *)
+              [ Tagged.Some, get_x ^^ load_some_payload_field env ]
+            )
+        )
+    )
+
+end (* Opt *)
+
+module Variant = struct
+  (* The Variant type. We store the variant tag in a first word; we can later
+     optimize and squeeze it in the Tagged tag. We can also later support unboxing
+     variants with an argument of type ().
+
+       ┌──────┬─────┬────────────┬─────────┐
+       │ obj header │ varianttag │ payload │
+       └──────┴─────┴────────────┴─────────┘
+
+     The object header includes the obj tag (TAG_VARIANT) and the forwarding pointer.
+  *)
+
+  let variant_tag_field = Tagged.header_size
+  let payload_field = Int64.add variant_tag_field 1L
+
+  let hash_variant_label env : Mo_types.Type.lab -> int64 =
+    E.hash env
+
+  let inject env l e =
+    Tagged.obj env Tagged.Variant [compile_unboxed_const (hash_variant_label env l); e]
+
+  let get_variant_tag env =
+    Tagged.load_forwarding_pointer env ^^
+    Tagged.load_field env variant_tag_field
+
+  let project env =
+    Tagged.load_forwarding_pointer env ^^
+    Tagged.load_field env payload_field
+
+  (* Test if the top of the stack points to a variant with this label *)
+  let test_is env l =
+    get_variant_tag env ^^
+    compile_eq_const (hash_variant_label env l)
+
+end (* Variant *)
+
+
+module Closure = struct
+  (* In this module, we deal with closures, i.e. functions that capture parts
+     of their environment.
+
+     The structure of a closure is:
+
+       ┌──────┬─────┬───────┬──────┬──────────────┐
+       │ obj header │ funid │ size │ captured ... │
+       └──────┴─────┴───────┴──────┴──────────────┘
+
+     The object header includes the object tag (TAG_CLOSURE) and the forwarding pointer.
+  *)
+  let header_size = Int64.add Tagged.header_size 2L
+
+  let funptr_field = Tagged.header_size
+  let len_field = Int64.add 1L Tagged.header_size
+
+  let load_data env i =
+    Tagged.load_forwarding_pointer env ^^
+    Tagged.load_field env (Int64.add header_size i)
+
+  let store_data env i =
+    let (set_closure_data, get_closure_data) = new_local env "closure_data" in
+    set_closure_data ^^
+    Tagged.load_forwarding_pointer env ^^
+    get_closure_data ^^
+    Tagged.store_field env (Int64.add header_size i)
+
+  let prepare_closure_call env =
+    Tagged.load_forwarding_pointer env
+
+  (* Expect on the stack
+     * the function closure (using prepare_closure_call)
+     * and arguments (n-ary!)
+     * the function closure again!
+  *)
+  let call_closure env n_args n_res =
+    (* Calculate the wasm type for a given calling convention.
+       An extra first argument for the closure! *)
+    let ty = E.func_type env (FuncType (
+      I64Type :: Lib.List.make n_args I64Type,
+      FakeMultiVal.ty (Lib.List.make n_res I64Type))) in
+    (* get the table index *)
+    Tagged.load_forwarding_pointer env ^^
+    Tagged.load_field env funptr_field ^^
+    G.i (Convert (Wasm_exts.Values.I32 I32Op.WrapI64)) ^^
+    (* All done: Call! *)
+    G.i (CallIndirect (nr ty)) ^^
+    FakeMultiVal.load env (Lib.List.make n_res I64Type)
+
+  let constant env get_fi =
+    let fi = Wasm.I64_convert.extend_i32_u (E.add_fun_ptr env (get_fi ())) in
+    Tagged.shared_object env (fun env -> Tagged.obj env Tagged.Closure [
+      compile_unboxed_const fi;
+      compile_unboxed_const 0L
+    ])
+
+end (* Closure *)
+
+
+module BoxedWord64 = struct
+  (* We store large word64s, nat64s and int64s in immutable boxed 64bit heap objects.
+
+     Small values are stored unboxed, tagged, see BitTagged. The bit-tagging logic is
+     contained in BitTagged; here we just do the boxing.
+
+     The heap layout of a BoxedWord64 is:
+
+       ┌──────┬─────┬─────┐
+       │ obj header │ i64 │
+       └──────┴─────┴─────┘
+
+     The object header includes the object tag (Bits64) and the forwarding pointer.
+  *)
+
+  let payload_field = Tagged.header_size
+
+  let heap_tag env pty =
+    match pty with
+    | Type.Nat64 -> Tagged.(Bits64 U)
+    | Type.Int64 -> Tagged.(Bits64 S)
+    | _ -> assert false
+
+  let compile_box env pty compile_elem : G.t =
+    let (set_i, get_i) = new_local env "boxed_i64" in
+    let size = 4L in
+    Tagged.alloc env size (heap_tag env pty) ^^
+    set_i ^^
+    get_i ^^ compile_elem ^^ Tagged.store_field env payload_field ^^
+    get_i ^^
+    Tagged.allocation_barrier env
+
+  let constant env pty i =
+    if BitTagged.can_tag_const pty i
+    then 
+      E.Vanilla (BitTagged.tag_const pty i)
+    else
+      Tagged.shared_object env (fun env -> compile_box env pty (compile_unboxed_const i))
+
+  let box env pty = 
+    Func.share_code1 Func.Never env 
+      (prim_fun_name pty "box64") ("n", I64Type) [I64Type] (fun env get_n ->
+      get_n ^^ BitTagged.if_can_tag_signed env pty [I64Type]
+        (get_n ^^ BitTagged.tag env pty)
+        (compile_box env pty get_n)
+    )
+
+  let unbox env pty = 
+    Func.share_code1 Func.Never env 
+      (prim_fun_name pty "unbox64") ("n", I64Type) [I64Type] (fun env get_n ->
+      get_n ^^
+      BitTagged.if_tagged_scalar env [I64Type]
+        (get_n ^^ BitTagged.untag __LINE__ env pty)
+        (get_n ^^
+         Tagged.load_forwarding_pointer env ^^
+         Tagged.(sanity_check_tag __LINE__ env (heap_tag env pty)) ^^
+         Tagged.load_field env payload_field)
+    )
+end (* BoxedWord64 *)
+
+module Word64 = struct
+
+  let compile_add env = G.i (Binary (Wasm_exts.Values.I64 I64Op.Add))
+  let compile_signed_sub env = G.i (Binary (Wasm_exts.Values.I64 I64Op.Sub))
+  let compile_mul env = G.i (Binary (Wasm_exts.Values.I64 I64Op.Mul))
+  let compile_signed_div env = G.i (Binary (Wasm_exts.Values.I64 I64Op.DivS))
+  let compile_signed_mod env = G.i (Binary (Wasm_exts.Values.I64 I64Op.RemS))
+  let compile_unsigned_div env = G.i (Binary (Wasm_exts.Values.I64 I64Op.DivU))
+  let compile_unsigned_rem env = G.i (Binary (Wasm_exts.Values.I64 I64Op.RemU))
+  let compile_unsigned_sub env =
+    Func.share_code2 Func.Never env "nat_sub" (("n1", I64Type), ("n2", I64Type)) [I64Type] (fun env get_n1 get_n2 ->
+      get_n1 ^^ get_n2 ^^ compile_comparison I64Op.LtU ^^
+      E.then_trap_with env "Natural subtraction underflow" ^^
+      get_n1 ^^ get_n2 ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Sub))
+    )
+
+  let compile_unsigned_pow env =
+    let name = prim_fun_name Type.Nat64 "wpow_nat" in
+    Func.share_code2 Func.Always env name (("n", I64Type), ("exp", I64Type)) [I64Type]
+      (fun env get_n get_exp ->
+        let set_n = G.setter_for get_n in
+        let set_exp = G.setter_for get_exp in
+        let (set_acc, get_acc) = new_local env "acc" in
+
+        (* start with result = 1 *)
+        compile_unboxed_const 1L ^^ set_acc ^^
+
+        (* handle exp == 0 *)
+        get_exp ^^ compile_test I64Op.Eqz ^^
+        E.if1 I64Type get_acc (* done *)
+        begin
+          G.loop0 begin
+            (* Are we done? *)
+            get_exp ^^ compile_unboxed_const 1L ^^ compile_comparison I64Op.LeU ^^
+            E.if0 G.nop (* done *)
+            begin
+              (* Check low bit of exp to see if we need to multiply *)
+              get_exp ^^ compile_shl_const 63L ^^ compile_test I64Op.Eqz ^^
+              E.if0 G.nop
+              begin
+                (* Multiply! *)
+                get_acc ^^ get_n ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Mul)) ^^ set_acc
+              end ^^
+              (* Square n, and shift exponent *)
+              get_n ^^ get_n ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Mul)) ^^ set_n ^^
+              get_exp ^^ compile_shrU_const 1L ^^ set_exp ^^
+              (* And loop *)
+              G.i (Br (nr 1l))
+            end
+          end ^^
+          (* Multiply a last time *)
+          get_acc ^^ get_n ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Mul))
+        end
+      )
+
+
+  let compile_signed_wpow env =
+    Func.share_code2 Func.Never env "wrap_pow_Int64" (("n", I64Type), ("exp", I64Type)) [I64Type]
+      (fun env get_n get_exp ->
+        get_exp ^^
+        compile_unboxed_const 0L ^^
+        compile_comparison I64Op.GeS ^^
+        E.else_trap_with env "negative power" ^^
+        get_n ^^ get_exp ^^ compile_unsigned_pow env
+      )
+
+  let _compile_eq env = compile_comparison I64Op.Eq
+  let compile_relop env i64op = compile_comparison i64op
+
+  let btst_kernel env =
+    let (set_b, get_b) = new_local env "b" in
+    set_b ^^ compile_unboxed_const 1L ^^ get_b ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Shl)) ^^
+    G.i (Binary (Wasm_exts.Values.I64 I64Op.And))
+
+end (* BoxedWord64 *)
+
+module TaggedSmallWord = struct
+  (* While smaller-than-64bit words are treated as i64 from the WebAssembly
+     perspective, there are certain differences that are type based. This module
+     provides helpers to abstract over those.
+
+     Caution: Some functions here are also used for unboxed Nat64/Int64, while others
+     are _only_ used for the small ones. Check call-sites!
+  *)
+
+  let toNat = Type.(function
+    | Int8 | Nat8 -> Nat8
+    | Int16 | Nat16 -> Nat16
+    | Int32 | Nat32 -> Nat32
+    | Int64 | Nat64 -> Nat64
+    | _ -> assert false)
+
+  let bits_of_type = Type.(function
+    | Int8 | Nat8 -> 8
+    | Int16 | Nat16 -> 16
+    | Char -> 21
+    | Nat32 | Int32 -> 32
+    (* unboxed on stack *)
+    | Nat64 | Int64 -> 64
+    | _  -> assert false)
+
+  let tag_of_type pty = Type.(match pty with
+    | Int8 | Nat8
+    | Int16 | Nat16
+    | Int32 | Nat32
+    | Char ->
+      TaggingScheme.tag_of_typ pty
+    (* unboxed on stack *)
+    | Int64 | Nat64 -> 0L
+    | _ -> assert false)
+
+  let shift_of_type ty = Int64.of_int (64 - bits_of_type ty)
+
+  let bitwidth_mask_of_type = function
+    | Type.(Int8|Nat8) -> 0b111L
+    | Type.(Int16|Nat16) -> 0b1111L
+    | Type.(Int32|Nat32) -> 0b11111L
+    | p -> todo "bitwidth_mask_of_type" (Arrange_type.prim p) 0L
+
+  let const_of_type ty n = Int64.(shift_left n (to_int (shift_of_type ty)))
+
+  let padding_of_type ty = Int64.(sub (const_of_type ty 1L) one)
+
+  let mask_of_type ty = Int64.lognot (padding_of_type ty)
+
+  (* Makes sure that we only shift/rotate the maximum number of bits available in the word. *)
+  let clamp_shift_amount = function
+    | Type.(Nat64|Int64) -> G.nop
+    | ty -> compile_bitand_const (bitwidth_mask_of_type ty)
+
+  let shift_leftWordNtoI64 = compile_shl_const
+
+  (* Makes sure that the word payload (e.g. shift/rotate amount) is in the LSB bits of the word. *)
+  let lsb_adjust = function
+    | Type.(Int64|Nat64) -> G.nop
+    | Type.(Nat8|Nat16|Nat32) as ty -> compile_shrU_const (shift_of_type ty)
+    | Type.(Int8|Int16|Int32) as ty -> compile_shrS_const (shift_of_type ty)
+    | Type.Char as ty -> compile_shrU_const (shift_of_type ty)
+    | _ -> assert false
+
+  (* Makes sure that the word payload (e.g. operation result) is in the MSB bits of the word. *)
+  let msb_adjust = function
+    | Type.(Int64|Nat64) -> G.nop
+    | ty -> shift_leftWordNtoI64 (shift_of_type ty)
+
+  (* Makes sure that the word representation invariant is restored. *)
+  let sanitize_word_result = function
+    | Type.(Nat64|Int64) -> G.nop
+    | ty -> compile_bitand_const (mask_of_type ty)
+
+  (* Sets the number (according to the type's word invariant) of LSBs. *)
+  let compile_word_padding = function
+    | Type.(Nat64|Int64) -> G.nop
+    | ty -> compile_bitor_const (padding_of_type ty)
+
+  (* Kernel for counting leading zeros, according to the word invariant. *)
+  let clz_kernel ty =
+    compile_word_padding ty ^^
+    G.i (Unary (Wasm_exts.Values.I64 I64Op.Clz)) ^^
+    msb_adjust ty
+
+  (* Kernel for counting trailing zeros, according to the word invariant. *)
+  let ctz_kernel ty =
+    compile_word_padding ty ^^
+    compile_rotr_const (shift_of_type ty) ^^
+    G.i (Unary (Wasm_exts.Values.I64 I64Op.Ctz)) ^^
+    msb_adjust ty
+
+  (* Kernel for testing a bit position, according to the word invariant. *)
+  let btst_kernel env ty =
+    let (set_b, get_b) = new_local env "b"
+    in lsb_adjust ty ^^ set_b ^^ lsb_adjust ty ^^
+       compile_unboxed_one ^^ get_b ^^ clamp_shift_amount ty ^^
+       G.i (Binary (Wasm_exts.Values.I64 I64Op.Shl)) ^^
+       G.i (Binary (Wasm_exts.Values.I64 I64Op.And)) ^^
+       msb_adjust ty
+
+  (* Code points occupy 21 bits, so can always be tagged scalars *)
+  let lsb_adjust_codepoint env = lsb_adjust Type.Char
+  let msb_adjust_codepoint = msb_adjust Type.Char
+
+  (* Checks (n < 0xD800 || 0xE000 ≤ n ≤ 0x10FFFF),
+     ensuring the codepoint range and the absence of surrogates. *)
+  let check_and_msb_adjust_codepoint env =
+    Func.share_code1 Func.Always env "Nat32->Char" ("n", I64Type) [I64Type] (fun env get_n ->
+      get_n ^^ compile_unboxed_const 0xD800L ^^
+      compile_comparison I64Op.GeU ^^
+      get_n ^^ compile_unboxed_const 0xE000L ^^
+      compile_comparison I64Op.LtU ^^
+      G.i (Binary (Wasm_exts.Values.I64 I64Op.And)) ^^
+      get_n ^^ compile_unboxed_const 0x10FFFFL ^^
+      compile_comparison I64Op.GtU ^^
+      G.i (Binary (Wasm_exts.Values.I64 I64Op.Or)) ^^
+      E.then_trap_with env "codepoint out of range" ^^
+      get_n ^^ msb_adjust_codepoint
+    )
+
+  let vanilla_lit ty v =
+    Int64.(shift_left (of_int v) (to_int (shift_of_type ty)))
+    |> Int64.logor (tag_of_type ty)
+
+  (* Wrapping implementation for multiplication and exponentiation. *)
+
+  let compile_word_mul env ty =
+    lsb_adjust ty ^^
+    G.i (Binary (Wasm_exts.Values.I64 I64Op.Mul))
+
+  let compile_nat_power env ty =
+    let name = prim_fun_name ty "wpow_nat" in
+    (* Square- and multiply exponentiation *)
+    Func.share_code2 Func.Always env name (("n", I64Type), ("exp", I64Type)) [I64Type]
+      (fun env get_n get_exp ->
+        let set_n = G.setter_for get_n in
+        let set_exp = G.setter_for get_exp in
+        let (set_acc, get_acc) = new_local env "acc" in
+
+        (* unshift arguments *)
+        get_exp ^^ compile_shrU_const (shift_of_type ty) ^^ set_exp ^^
+        get_n ^^ compile_shrU_const (shift_of_type ty) ^^ set_n ^^
+
+        (* The accumulator starts with and stays shifted, so no other shifts needed. *)
+        compile_unboxed_const (const_of_type ty 1L) ^^ set_acc ^^
+
+        (* handle exp == 0 *)
+        get_exp ^^ compile_test I64Op.Eqz ^^
+        E.if1 I64Type get_acc (* done *)
+        begin
+          G.loop0 begin
+            (* Are we done? *)
+            get_exp ^^ compile_unboxed_const 1L ^^ compile_comparison I64Op.LeU ^^
+            E.if0 G.nop (* done *)
+            begin
+              (* Check low bit of exp to see if we need to multiply *)
+              get_exp ^^ compile_shl_const 63L ^^ compile_test I64Op.Eqz ^^
+              E.if0 G.nop
+              begin
+                (* Multiply! *)
+                get_acc ^^ get_n ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Mul)) ^^ set_acc
+              end ^^
+              (* Square n, and shift exponent *)
+              get_n ^^ get_n ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Mul)) ^^ set_n ^^
+              get_exp ^^ compile_shrU_const 1L ^^ set_exp ^^
+              (* And loop *)
+              G.i (Br (nr 1l))
+            end
+          end ^^
+          (* Multiply a last time *)
+          get_acc ^^ get_n ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Mul))
+          (* Accumulator was shifted, so no further shift needed here *)
+        end
+      )
+
+  let compile_int_power env ty =
+    let name = prim_fun_name ty "wpow_int" in
+    Func.share_code2 Func.Never env name (("n", I64Type), ("exp", I64Type)) [I64Type]
+      (fun env get_n get_exp ->
+        get_exp ^^
+        compile_unboxed_const 0L ^^
+        compile_comparison I64Op.GeS ^^
+        E.else_trap_with env "negative power" ^^
+        get_n ^^ get_exp ^^ compile_nat_power env (toNat ty))
+
+  (* To rotate, first rotate a copy by bits_of_type into the other direction *)
+  let rotl env ty =
+     Func.share_code2 Func.Never env (prim_fun_name ty "rotl") (("n", I64Type), ("by", I64Type)) [I64Type]
+       (fun env get_n get_by ->
+        let open Wasm_exts.Values in
+        let beside_adjust = compile_rotr_const (Int64.of_int (bits_of_type ty)) in
+        get_n ^^ get_n ^^ beside_adjust ^^ G.i (Binary (I64 I64Op.Or)) ^^
+        get_by ^^ lsb_adjust ty ^^ clamp_shift_amount ty ^^ G.i (Binary (I64 I64Op.Rotl)) ^^
+        sanitize_word_result ty
+       )
+
+  let rotr env ty =
+     Func.share_code2 Func.Never env (prim_fun_name ty "rotr") (("n", I64Type), ("by", I64Type)) [I64Type]
+       (fun env get_n get_by ->
+        let open Wasm_exts.Values in
+        let beside_adjust = compile_rotl_const (Int64.of_int (bits_of_type ty)) in
+        get_n ^^ get_n ^^ beside_adjust ^^ G.i (Binary (I64 I64Op.Or)) ^^
+        get_by ^^ lsb_adjust ty ^^ clamp_shift_amount ty ^^ G.i (Binary (I64 I64Op.Rotr)) ^^
+        sanitize_word_result ty
+       )
+
+  let tag env pty =
+    match pty with
+    | Type.(Nat8 | Int8 | Nat16 | Int16 | Nat32 | Int32 | Char) ->
+      (* TODO:  could sanity check low bits clear *)
+      (* add tag *)
+      compile_bitor_const (tag_of_type pty)
+    | _ -> assert false
+
+  let untag env pty =
+    match pty with
+    | Type.(Nat8 | Int8 | Nat16 | Int16 | Nat32 | Int32 | Char) ->
+       (* check tag *)
+       BitTagged.sanity_check_tag __LINE__ env pty ^^
+       (* clear tag *)
+       BitTagged.clear_tag env pty
+    | _ -> assert false
+
+end (* TaggedSmallWord *)
+
+
+module Float = struct
+  (* We store floats (C doubles) in immutable boxed 64bit heap objects.
+
+     The heap layout of a Float is:
+
+       ┌──────┬─────┬─────┐
+       │ obj header │ f64 │
+       └──────┴─────┴─────┘
+
+     For now the tag stored is that of a Bits64, because the payload is
+     treated opaquely by the RTS. We'll introduce a separate tag when the need of
+     debug inspection (or GC representation change) arises.
+
+     The object header includes the object tag (Bits64) and the forwarding pointer.
+  *)
+
+  let payload_field = Tagged.header_size
+
+  let compile_unboxed_const f = G.i (Const (nr (Wasm_exts.Values.F64 f)))
+
+  let box env = Func.share_code1 Func.Never env "box_f64" ("f", F64Type) [I64Type] (fun env get_f ->
+    let (set_i, get_i) = new_local env "boxed_f64" in
+    let size = Int64.add Tagged.header_size 2L in
+    Tagged.alloc env size Tagged.(Bits64 F) ^^
+    set_i ^^
+    get_i ^^ get_f ^^ Tagged.store_field_float64 env payload_field ^^
+    get_i ^^
+    Tagged.allocation_barrier env
+  )
+
+  let unbox env = 
+    Tagged.load_forwarding_pointer env ^^ 
+    Tagged.(sanity_check_tag __LINE__ env (Bits64 F)) ^^
+    Tagged.load_field_float64 env payload_field
+
+  let constant env f = Tagged.shared_object env (fun env -> 
+    compile_unboxed_const f ^^ 
+    box env)
+
+end (* Float *)
+
+
+module ReadBuf = struct
+  (*
+  Combinators to safely read from a dynamic buffer.
+
+  We represent a buffer by a pointer to two words in memory (usually allocated
+  on the shadow stack): The first is a pointer to the current position of the buffer,
+  the second one a pointer to the end (to check out-of-bounds).
+
+  Code that reads from this buffer will update the former, i.e. it is mutable.
+
+  The format is compatible with C (pointer to a struct) and avoids the need for the
+  multi-value extension that we used before to return both parse result _and_
+  updated pointer.
+
+  All pointers here are unskewed!
+
+  This module is mostly for serialization, but because there are bits of
+  serialization code in the BigNumType implementations, we put it here.
+  *)
+
+  let get_ptr get_buf =
+    get_buf ^^ G.i (Load {ty = I64Type; align = 3; offset = 0L; sz = None})
+  let get_end get_buf =
+    get_buf ^^ G.i (Load {ty = I64Type; align = 3; offset = Heap.word_size; sz = None})
+  let set_ptr get_buf new_val =
+    get_buf ^^ new_val ^^ G.i (Store {ty = I64Type; align = 3; offset = 0L; sz = None})
+  let set_end get_buf new_val =
+    get_buf ^^ new_val ^^ G.i (Store {ty = I64Type; align = 3; offset = Heap.word_size; sz = None})
+  let set_size get_buf get_size =
+    set_end get_buf
+      (get_ptr get_buf ^^ get_size ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)))
+
+  let alloc env f = Stack.with_words env "buf" 2L f
+
+  let advance get_buf get_delta =
+    set_ptr get_buf (get_ptr get_buf ^^ get_delta ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)))
+
+  let read_leb128 env get_buf =
+    get_buf ^^ E.call_import env "rts" "leb128_decode"
+
+  let read_sleb128 env get_buf =
+    get_buf ^^ E.call_import env "rts" "sleb128_decode"
+
+  let check_space env get_buf get_delta =
+    get_delta ^^
+    get_end get_buf ^^ get_ptr get_buf ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Sub)) ^^
+    compile_comparison I64Op.LeU ^^
+    E.else_trap_with env "IDL error: out of bounds read"
+
+  let check_page_end env get_buf incr_delta =
+    get_ptr get_buf ^^ compile_bitand_const 0xFFFFL ^^
+    incr_delta ^^
+    compile_shrU_const 16L
+
+  let is_empty env get_buf =
+    get_end get_buf ^^ get_ptr get_buf ^^
+    compile_comparison I64Op.Eq
+
+  let read_byte env get_buf =
+    check_space env get_buf (compile_unboxed_const 1L) ^^
+    get_ptr get_buf ^^
+    G.i (Load {ty = I32Type; align = 0; offset = 0L; sz = Some Wasm_exts.Types.(Pack8, ZX)}) ^^
+    G.i (Convert (Wasm_exts.Values.I64 I64Op.ExtendUI32)) ^^
+    advance get_buf (compile_unboxed_const 1L)
+
+  let read_word16 env get_buf =
+    check_space env get_buf (compile_unboxed_const 2L) ^^
+    get_ptr get_buf ^^
+    G.i (Load {ty = I32Type; align = 0; offset = 0L; sz = Some Wasm_exts.Types.(Pack16, ZX)}) ^^
+    G.i (Convert (Wasm_exts.Values.I64 I64Op.ExtendUI32)) ^^
+    advance get_buf (compile_unboxed_const 2L)
+
+  let read_word32 env get_buf =
+    check_space env get_buf (compile_unboxed_const 4L) ^^
+    get_ptr get_buf ^^
+    G.i (Load {ty = I32Type; align = 0; offset = 0L; sz = None}) ^^
+    G.i (Convert (Wasm_exts.Values.I64 I64Op.ExtendUI32)) ^^
+    advance get_buf (compile_unboxed_const 4L)
+
+  let read_signed_word32 env get_buf =
+    check_space env get_buf (compile_unboxed_const 4L) ^^
+    get_ptr get_buf ^^
+    G.i (Load {ty = I32Type; align = 0; offset = 0L; sz = None}) ^^
+    G.i (Convert (Wasm_exts.Values.I64 I64Op.ExtendSI32)) ^^
+    advance get_buf (compile_unboxed_const 4L)
+
+  let speculative_read_word64 env get_buf =
+    check_page_end env get_buf (compile_add_const 8L) ^^
+    E.if1 I64Type
+      (compile_unboxed_const (-1L))
+      begin
+        get_ptr get_buf ^^
+        G.i (Load {ty = I64Type; align = 0; offset = 0L; sz = None})
+      end
+
+  let read_word64 env get_buf =
+    check_space env get_buf (compile_unboxed_const 8L) ^^
+    get_ptr get_buf ^^
+    G.i (Load {ty = I64Type; align = 0; offset = 0L; sz = None}) ^^
+    advance get_buf (compile_unboxed_const 8L)
+
+  let read_float64 env get_buf =
+    check_space env get_buf (compile_unboxed_const 8L) ^^
+    get_ptr get_buf ^^
+    G.i (Load {ty = F64Type; align = 0; offset = 0L; sz = None}) ^^
+    advance get_buf (compile_unboxed_const 8L)
+
+  let read_blob env get_buf get_len =
+    check_space env get_buf get_len ^^
+    (* Already has destination address on the stack *)
+    get_ptr get_buf ^^
+    get_len ^^
+    Heap.memcpy env ^^
+    advance get_buf get_len
+
+end (* Buf *)
+
+
+type comparator = Lt | Le | Ge | Gt
+
+module type BigNumType =
+sig
+  (* word from SR.Vanilla, trapping, unsigned semantics *)
+  val to_word64 : E.t -> G.t
+  val to_word64_with : E.t -> G.t -> G.t (* with error message on stack (ptr/len) *)
+
+  (* word from SR.Vanilla, lossy, raw bits *)
+  val truncate_to_word32 : E.t -> G.t
+  val truncate_to_word64 : E.t -> G.t
+
+  (* unsigned word to SR.Vanilla *)
+  val from_word64 : E.t -> G.t
+
+  (* signed word to SR.Vanilla *)
+  val from_signed_word_compact : E.t -> G.t
+  val from_signed_word64 : E.t -> G.t
+
+  (* buffers *)
+  (* given a numeric object on stack (vanilla),
+     push the number (i64) of bytes necessary
+     to externalize the numeric object *)
+  val compile_data_size_signed : E.t -> G.t
+  val compile_data_size_unsigned : E.t -> G.t
+  (* given on stack
+     - numeric object (vanilla, TOS)
+     - data buffer
+    store the binary representation of the numeric object into the data buffer,
+    and push the number (i64) of bytes stored onto the stack
+   *)
+  val compile_store_to_data_buf_signed : E.t -> G.t
+  val compile_store_to_data_buf_unsigned : E.t -> G.t
+  (* given a ReadBuf on stack, consume bytes from it,
+     deserializing to a numeric object
+     and leave it on the stack (vanilla).
+     The boolean argument is true if the value to be read is signed.
+   *)
+  val compile_load_from_data_buf : E.t -> G.t -> bool -> G.t
+
+  (* constant *)
+  val constant : E.t -> Big_int.big_int -> E.shared_value
+
+  (* arithmetic *)
+  val compile_abs : E.t -> G.t
+  val compile_neg : E.t -> G.t
+  val compile_add : E.t -> G.t
+  val compile_signed_sub : E.t -> G.t
+  val compile_unsigned_sub : E.t -> G.t
+  val compile_mul : E.t -> G.t
+  val compile_signed_div : E.t -> G.t
+  val compile_signed_mod : E.t -> G.t
+  val compile_unsigned_div : E.t -> G.t
+  val compile_unsigned_rem : E.t -> G.t
+  val compile_unsigned_pow : E.t -> G.t
+  val compile_lsh : E.t -> G.t
+  val compile_rsh : E.t -> G.t
+
+  (* comparisons *)
+  val compile_eq : E.t -> G.t
+  val compile_is_negative : E.t -> G.t
+  val compile_relop : E.t -> comparator -> G.t
+
+  (* representation checks *)
+  (* given a numeric object on the stack as skewed pointer, check whether
+     it can be faithfully stored in N bits, including a leading sign bit
+     leaves boolean result on the stack
+     N must be 2..64
+   *)
+  val fits_signed_bits : E.t -> int -> G.t
+  (* given a numeric object on the stack as skewed pointer, check whether
+     it can be faithfully stored in N unsigned bits
+     leaves boolean result on the stack
+     N must be 1..64
+   *)
+  val fits_unsigned_bits : E.t -> int -> G.t
+end
+
+let i64op_from_relop = function
+  | Lt -> I64Op.LtS
+  | Le -> I64Op.LeS
+  | Ge -> I64Op.GeS
+  | Gt -> I64Op.GtS
+
+let name_from_relop = function
+  | Lt -> "B_lt"
+  | Le -> "B_le"
+  | Ge -> "B_ge"
+  | Gt -> "B_gt"
+
+(* helper, measures the dynamics of the unsigned i64, returns (64 - effective bits) *)
+let unsigned_dynamics get_x =
+  get_x ^^
+  G.i (Unary (Wasm_exts.Values.I64 I64Op.Clz))
+
+(* helper, measures the dynamics of the signed i64, returns (64 - effective bits) *)
+let signed_dynamics get_x =
+  get_x ^^ compile_shl_const 1L ^^
+  get_x ^^
+  G.i (Binary (Wasm_exts.Values.I64 I64Op.Xor)) ^^
+  G.i (Unary (Wasm_exts.Values.I64 I64Op.Clz))
+
+module I32Leb = struct
+  let compile_size dynamics get_x =
+    get_x ^^ Bool.from_int64 ^^
+    E.if1 I64Type
+      begin
+        (* Add (7-1) to prepare division by 7 that is rounded up *)
+        compile_unboxed_const (Int64.of_int (Int.add 64 (Int.sub 7 1))) ^^
+        dynamics get_x ^^
+        G.i (Binary (Wasm_exts.Values.I64 I64Op.Sub)) ^^
+        compile_divU_const 7L
+      end
+      (compile_unboxed_const 1L)
+
+  let compile_leb128_size get_x = compile_size unsigned_dynamics get_x
+  let compile_sleb128_size get_x = compile_size signed_dynamics get_x
+
+  let compile_store_to_data_buf_unsigned env get_x get_buf =
+    get_x ^^ get_buf ^^ 
+    E.call_import env "rts" "leb128_encode" ^^
+    compile_leb128_size get_x
+
+  let compile_store_to_data_buf_signed env get_x get_buf =
+    get_x ^^ get_buf ^^ 
+    E.call_import env "rts" "sleb128_encode" ^^
+    compile_sleb128_size get_x
+end
+
+module MakeCompact (Num : BigNumType) : BigNumType = struct
+
+  (* Compact BigNums are a representation of signed BitTagged.ubit-bignums (of the
+     underlying boxed representation `Num`), that fit into an i64 as per the
+     BitTagged representation.
+
+     Many arithmetic operations can be be performed on this right-zero-padded
+     representation directly. For some operations (e.g. multiplication) the
+     second argument needs to be furthermore right-shifted to avoid overflow.
+     Similarly, for division the result must be left-shifted.
+
+     Generally all operations begin with checking whether both arguments are
+     already tagged scalars. If so, the arithmetic can be performed in machine
+     registers (fast path). Otherwise one or both arguments need boxing and the
+     arithmetic needs to be carried out on the underlying boxed bignum
+     representation (slow path).
+
+     The result appears as a boxed number in the latter case, so a check is
+     performed if it can be a tagged scalar. Conversely in the former case the
+     64-bit result can either be a tagged scalar or needs to be boxed.
+
+     Manipulation of the result is unnecessary for the comparison predicates.
+
+     For the `pow` operation the check that both arguments are tagged scalars
+     is not sufficient. Here we count and multiply effective bitwidths to
+     figure out whether the operation will overflow 64 bits, and if so, we fall
+     back to the slow path.
+   *)
+
+  (* TODO: There is some unnecessary result shifting when the div result needs
+     to be boxed. Is this possible at all to happen? With (/-1) maybe! *)
+
+  (* TODO: Does the result of the rem/mod fast path ever needs boxing? *)
+
+  (* examine the skewed pointer and determine if number fits into ubits *)
+  let fits_in_vanilla env = Num.fits_signed_bits env (BitTagged.ubits_of Type.Int)
+
+  let clear_tag env = BitTagged.clear_tag env Type.Int
+
+  (* A variant of BitTagged.can_tag that works on signed i64 *)
+  let if_can_tag env retty is1 is2 =
+    let ubitsL = Int64.of_int(BitTagged.ubits_of Type.Int) in
+    compile_shrS_const (Int64.sub 64L ubitsL) ^^ BitTagged.if_can_tag_signed env Type.Int retty is1 is2
+
+  let apply_tag env =
+    compile_bitor_const (TaggingScheme.tag_of_typ Type.Int)
+
+  let can_use_fath_path env get_a get_b =
+    (* Check whether both arguments `a` and `b` are scalars that fit within 32 bit.
+        This is to guarantee overflow-free 64-bit arithmetics, such as `add`, `sub`, or `mul`.
+        However, this does not work for `pow` as it can overflow for smaller arguments. *)
+    (* check with a combined bit mask that:
+       - (and `0x1`) Both arguments are scalars, none a skewed pointers
+       - (and `0xFFFF_FFFF_0000_0000`) Both arguments fit in 32-bit
+    TODO: Precise tag for Int has 2 bits -> 
+       Check if we could permit one or two more bits in the `0xFFFF_FFFF_0000_0000` bit mask. *)
+    get_a ^^ get_b ^^
+    G.i (Binary (Wasm_exts.Values.I64 I64Op.Or)) ^^
+    compile_bitand_const 0xFFFF_FFFF_0000_0001L ^^
+    compile_eq_const 0x0L
+    
+  (* creates a boxed bignum from a signed i64 *)
+  let box env =
+    let ubitsL = Int64.of_int(BitTagged.ubits_of Type.Int) in
+    compile_shrS_const (Int64.sub 64L ubitsL) ^^ Num.from_signed_word64 env
+
+  (* check if both arguments are tagged scalars,
+     if so, perform the fast path.
+     Otherwise make sure that both arguments are in heap representation,
+     and run the slow path on them.
+     In both cases bring the results into normal form.
+   *)
+  let try_unbox2 name fast slow env =
+    Func.share_code2 Func.Always env name (("a", I64Type), ("b", I64Type)) [I64Type]
+      (fun env get_a get_b ->
+        let set_res, get_res = new_local env "res" in
+        can_use_fath_path env get_a get_b ^^
+        E.if1 I64Type
+          begin
+            get_a ^^ clear_tag env ^^
+            get_b ^^ clear_tag env ^^
+            fast env ^^ set_res ^^
+            get_res ^^
+            if_can_tag env [I64Type]
+              (get_res ^^ apply_tag env)
+              (get_res ^^ box env)
+          end
+          begin
+            get_a ^^ BitTagged.if_tagged_scalar env [I64Type]
+              (get_a ^^ box env)
+              get_a ^^
+            get_b ^^ BitTagged.if_tagged_scalar env [I64Type]
+              (get_b ^^ box env)
+              get_b ^^
+            slow env ^^ set_res ^^ get_res ^^
+            fits_in_vanilla env ^^
+            E.if1 I64Type
+              (get_res ^^ Num.truncate_to_word64 env ^^ BitTagged.tag env Type.Int)
+              get_res
+          end
+      )
+
+  let compile_add = try_unbox2 "B_add" Word64.compile_add Num.compile_add
+
+  let adjust_arg2 code env =
+    compile_shrS_const (Int64.of_int (64 - BitTagged.ubits_of Type.Int)) ^^
+    code env (* TBR *)
+  let adjust_result code env =
+    code env ^^
+    compile_shl_const (Int64.of_int (64 - BitTagged.ubits_of Type.Int))
+
+  let compile_mul = try_unbox2 "B_mul" (adjust_arg2 Word64.compile_mul) Num.compile_mul
+  let compile_signed_sub = try_unbox2 "B+sub" Word64.compile_signed_sub Num.compile_signed_sub
+  let compile_signed_div = try_unbox2 "B+div" (adjust_result Word64.compile_signed_div) Num.compile_signed_div
+  let compile_signed_mod = try_unbox2 "B_mod" Word64.compile_signed_mod Num.compile_signed_mod
+  let compile_unsigned_div = try_unbox2 "B_div" (adjust_result Word64.compile_unsigned_div) Num.compile_unsigned_div
+  let compile_unsigned_rem = try_unbox2 "B_rem" Word64.compile_unsigned_rem Num.compile_unsigned_rem
+  let compile_unsigned_sub = try_unbox2 "B_sub" Word64.compile_unsigned_sub Num.compile_unsigned_sub
+
+  let compile_unsigned_pow env =
+    Func.share_code2 Func.Always env "B_pow" (("a", I64Type), ("b", I64Type)) [I64Type]
+    (fun env get_a get_b ->
+    let set_res, get_res = new_local env "res" in
+    get_a ^^ get_b ^^
+    BitTagged.if_both_tagged_scalar env [I64Type]
+      begin
+        let set_a = G.setter_for get_a in
+        let set_b = G.setter_for get_b in
+        (* Convert to plain Word64 *)
+        get_a ^^ BitTagged.untag __LINE__ env Type.Int ^^ set_a ^^
+        get_b ^^ BitTagged.untag __LINE__ env Type.Int ^^ set_b ^^
+
+        get_a ^^ Num.from_signed_word64 env ^^
+        get_b ^^ Num.from_signed_word64 env ^^
+        Num.compile_unsigned_pow env ^^ set_res ^^
+        get_res ^^ fits_in_vanilla env ^^
+        E.if1 I64Type
+          (get_res ^^ Num.truncate_to_word64 env ^^ BitTagged.tag env Type.Int)
+          get_res
+      end
+      begin
+        get_a ^^ BitTagged.if_tagged_scalar env [I64Type]
+          (get_a ^^ box env)
+          get_a ^^
+        get_b ^^ BitTagged.if_tagged_scalar env [I64Type]
+          (get_b ^^ box env)
+          get_b ^^
+        Num.compile_unsigned_pow env ^^ set_res ^^
+        get_res ^^ fits_in_vanilla env ^^
+        E.if1 I64Type
+          (get_res ^^ Num.truncate_to_word64 env ^^ BitTagged.tag env Type.Int)
+          get_res
+      end)
+
+  (*
+    Note [left shifting compact Nat]
+    For compact Nats with a number fitting in 32 bits (in scalar value representation) and a shift amount of 
+    less or equal 32, we perform a fast shift. Otherwise, the bignum shift via RTS is applied.
+   *)
+  let compile_lsh env =
+    Func.share_code2 Func.Always env "B_lsh" (("n", I64Type), ("amount", I64Type)) [I64Type]
+    (fun env get_n get_amount ->
+      let set_n = G.setter_for get_n in
+      get_amount ^^ TaggedSmallWord.lsb_adjust Type.Nat32 ^^ G.setter_for get_amount ^^
+      get_n ^^
+      BitTagged.if_tagged_scalar env [I64Type]
+      begin
+        (* see Note [left shifting compact Nat] *)
+        get_n ^^ BitTagged.untag __LINE__ env Type.Int ^^ set_n ^^
+        get_n ^^
+        compile_bitand_const 0xFFFF_FFFF_0000_0000L ^^
+        compile_eq_const 0L ^^
+        get_amount ^^ compile_rel_const I64Op.LeU 32L ^^
+        G.i (Binary (Wasm_exts.Values.I64 I64Op.And)) ^^
+        E.if1 I64Type
+        begin
+          get_n ^^ 
+          get_amount ^^ 
+          G.i (Binary (Wasm_exts.Values.I64 I64Op.Shl)) ^^
+          BitTagged.tag env Type.Int
+        end
+        begin
+          get_n ^^ Num.from_word64 env ^^ 
+          get_amount ^^ 
+          Num.compile_lsh env
+        end
+      end
+      begin
+        get_n ^^ get_amount ^^ Num.compile_lsh env
+      end)
+
+  let compile_rsh env =
+    Func.share_code2 Func.Always env "B_rsh" (("n", I64Type), ("amount", I64Type)) [I64Type]
+    (fun env get_n get_amount ->
+      get_amount ^^ TaggedSmallWord.lsb_adjust Type.Nat32 ^^ G.setter_for get_amount ^^
+      get_n ^^
+      BitTagged.if_tagged_scalar env [I64Type]
+        begin
+          get_n ^^ clear_tag env ^^
+          get_amount ^^
+          G.i (Binary (Wasm_exts.Values.I64 I64Op.ShrU)) ^^
+          compile_bitand_const Int64.(shift_left minus_one (64 - BitTagged.ubits_of Type.Int)) ^^
+          get_amount ^^ compile_rel_const I64Op.LeU (Int64.of_int (BitTagged.ubits_of Type.Int))^^
+          G.i (Binary (Wasm_exts.Values.I64 I64Op.Mul)) (* branch-free `if` *) ^^
+          (* tag *)
+          apply_tag env
+        end
+        begin
+          get_n ^^ get_amount ^^ Num.compile_rsh env ^^
+          let set_res, get_res = new_local env "res" in
+          set_res ^^ get_res ^^
+          fits_in_vanilla env ^^
+          E.if1 I64Type
+            (get_res ^^ Num.truncate_to_word64 env ^^ BitTagged.tag env Type.Int)
+            get_res
+        end)
+
+  let compile_is_negative env =
+    let set_n, get_n = new_local env "n" in
+    set_n ^^ get_n ^^
+    BitTagged.if_tagged_scalar env [I64Type]
+      (get_n ^^ clear_tag env ^^ compile_unboxed_const 0L ^^ compile_comparison I64Op.LtS)
+      (get_n ^^ Num.compile_is_negative env)
+
+  let constant env = function
+    | n when Big_int.is_int_big_int n && BitTagged.can_tag_const Type.Int (Big_int.int64_of_big_int n) ->
+      E.Vanilla (BitTagged.tag_const Type.Int (Big_int.int64_of_big_int n))
+    | n -> Num.constant env n
+
+  let compile_neg env =
+    let sminl = Int64.shift_left 1L (BitTagged.sbits_of Type.Int) in
+    let sminl_shifted = Int64.shift_left sminl (64 - BitTagged.ubits_of Type.Int) in
+    Func.share_code1 Func.Always env "B_neg" ("n", I64Type) [I64Type] (fun env get_n ->
+      get_n ^^ BitTagged.if_tagged_scalar env [I64Type]
+        begin
+          get_n ^^ clear_tag env ^^ compile_eq_const sminl_shifted ^^ (* -2^sbits, shifted ubits *)
+          E.if1 I64Type
+            (compile_unboxed_const sminl ^^ Num.from_word64 env)
+            begin
+              compile_unboxed_const 0L ^^
+              get_n ^^ clear_tag env ^^
+              G.i (Binary (Wasm_exts.Values.I64 I64Op.Sub)) ^^
+              (* tag the result *)
+              clear_tag env ^^
+              apply_tag env
+            end
+        end
+        (get_n ^^ Num.compile_neg env)
+    )
+
+  let try_comp_unbox2 name fast slow env =
+    Func.share_code2 Func.Always env name (("a", I64Type), ("b", I64Type)) [I64Type]
+      (fun env get_a get_b ->
+        get_a ^^ get_b ^^
+        BitTagged.if_both_tagged_scalar env [I64Type]
+          begin
+            get_a ^^ clear_tag env ^^
+            get_b ^^ clear_tag env ^^
+            fast env
+          end
+          begin
+            get_a ^^ BitTagged.if_tagged_scalar env [I64Type]
+              (get_a ^^ box env)
+              get_a ^^
+            get_b ^^ BitTagged.if_tagged_scalar env [I64Type]
+              (get_b ^^ box env)
+              get_b ^^
+            slow env
+          end)
+
+  let compile_eq env =
+    Func.share_code2 Func.Always env "B_eq" (("a", I64Type), ("b", I64Type)) [I64Type]
+      (fun env get_a get_b ->
+        get_a ^^ get_b ^^
+        compile_comparison I64Op.Eq ^^
+        E.if1 I64Type
+          (Bool.lit true)
+          (get_a ^^ get_b ^^
+           BitTagged.if_both_tagged_scalar env [I64Type]
+             (Bool.lit false)
+             begin
+               get_a ^^ BitTagged.if_tagged_scalar env [I64Type]
+                 (get_a ^^ box env)
+                 get_a ^^
+               get_b ^^ BitTagged.if_tagged_scalar env [I64Type]
+                 (get_b ^^ box env)
+                 get_b ^^
+               Num.compile_eq env
+             end))
+
+  let compile_relop env bigintop =
+    try_comp_unbox2 (name_from_relop bigintop)
+      (fun env' -> Word64.compile_relop env' (i64op_from_relop bigintop))
+      (fun env' -> Num.compile_relop env' bigintop)
+      env
+
+  let try_unbox iN fast slow env =
+    let set_a, get_a = new_local env "a" in
+    set_a ^^ get_a ^^
+    BitTagged.if_tagged_scalar env [iN]
+      (get_a ^^ fast env)
+      (get_a ^^ slow env)
+
+  let fits_unsigned_bits env n =
+    try_unbox I64Type (fun _ -> match n with
+        | 64 -> G.i Drop ^^ Bool.lit true
+        | 8 | 16 | 32 ->
+          (* use shifting to test that the payload including the tag fits the desired bit width. 
+              E.g. this is now n + 2 for Type.Int. *)
+          compile_bitand_const Int64.(shift_left minus_one (n + (64 - BitTagged.ubits_of Type.Int))) ^^
+          compile_test I64Op.Eqz
+        | _ -> assert false
+      )
+      (fun env -> Num.fits_unsigned_bits env n)
+      env
+
+  let sanity_check_fits_signed_bits env n get_a =
+     if TaggingScheme.debug || !Flags.sanity then
+     get_a ^^
+     Func.share_code2 Func.Always env ("check_fits_signed_bits_"^Int.to_string n) (("res", I64Type), ("a", I64Type)) [I64Type]
+      (fun env get_res get_a ->
+         let lower_bound = Int64.(neg (shift_left 1L (n-1))) in
+         let upper_bound = Int64.shift_left 1L (n-1) in
+         let set_a = G.setter_for get_a in
+         get_a ^^
+         compile_shrS_const (Int64.of_int (64 - BitTagged.ubits_of Type.Int)) ^^
+         set_a ^^
+         compile_unboxed_const lower_bound ^^
+         get_a ^^
+         compile_comparison I64Op.LeS ^^
+         get_a ^^ compile_unboxed_const upper_bound ^^
+         compile_comparison I64Op.LtS ^^
+         G.i (Binary (Wasm_exts.Values.I64 I64Op.And)) ^^
+         get_res ^^
+         compile_comparison I64Op.Eq ^^
+         E.else_trap_with env ("fit_signed_bits failure_"^Int.to_string n) ^^
+         get_res)
+     else G.nop
+
+  let fits_signed_bits env n =
+    let set_a, get_a = new_local env "a" in
+    try_unbox I64Type (fun _ -> match n with
+        | 64 -> G.i Drop ^^ Bool.lit true
+        | 8 | 16 | 32 ->
+           set_a ^^
+           get_a ^^ get_a ^^ compile_shrS_const 1L ^^
+           G.i (Binary (Wasm_exts.Values.I64 I64Op.Xor)) ^^
+           compile_bitand_const
+             Int64.(shift_left minus_one ((n-1) + (64 - BitTagged.ubits_of Type.Int))) ^^
+           compile_test I64Op.Eqz ^^
+           sanity_check_fits_signed_bits env n get_a
+        | _ -> assert false
+      )
+      (fun env -> Num.fits_signed_bits env n)
+      env
+
+  let compile_abs env =
+    let sminl = Int64.shift_left 1L (BitTagged.sbits_of Type.Int) in
+    let sminl_shifted = Int64.shift_left sminl (64 - BitTagged.ubits_of Type.Int) in
+    try_unbox I64Type
+      begin
+        fun _ ->
+        let set_a, get_a = new_local env "a" in
+        clear_tag env ^^
+        set_a ^^
+        get_a ^^ compile_unboxed_const 0L ^^ compile_comparison I64Op.LtS ^^
+        E.if1 I64Type
+          begin
+            get_a ^^
+            (* -2^sbits is small enough for compact representation, but 2^sbits isn't *)
+            compile_eq_const sminl_shifted ^^ (* i.e. -2^sbits shifted *)
+            E.if1 I64Type
+              (compile_unboxed_const sminl ^^ Num.from_word64 env)
+              begin
+                (* absolute value works directly on shifted representation *)
+                compile_unboxed_const 0L ^^
+                get_a ^^
+                G.i (Binary (Wasm_exts.Values.I64 I64Op.Sub)) ^^
+                apply_tag env
+              end
+          end
+          begin
+            get_a ^^
+            compile_bitor_const (TaggingScheme.tag_of_typ Type.Int)
+          end
+      end
+      Num.compile_abs
+      env
+
+  let compile_load_from_word64 env get_data_buf = function
+    | false -> get_data_buf ^^ E.call_import env "rts" "bigint_leb128_decode_word64"
+    | true -> get_data_buf ^^ E.call_import env "rts" "bigint_sleb128_decode_word64"
+
+  let compile_load_from_data_buf env get_data_buf signed =
+    (* see Note [speculating for short (S)LEB encoded bignums] *)
+    ReadBuf.speculative_read_word64 env get_data_buf ^^
+    let set_a, get_a = new_local env "a" in
+    set_a ^^ get_a ^^
+    compile_xor_const (-1L) ^^
+    compile_bitand_const 0x8080_8080_8080_8080L ^^
+    let set_eom, get_eom = new_local env "eom" in
+    set_eom ^^ get_eom ^^
+    compile_test I64Op.Eqz ^^
+    E.if1 I64Type
+      begin
+        Num.compile_load_from_data_buf env get_data_buf signed
+      end
+      begin
+        get_a ^^
+        get_eom ^^ G.i (Unary (Wasm_exts.Values.I64 I64Op.Ctz)) ^^
+        compile_load_from_word64 env get_data_buf signed
+      end
+
+  let compile_store_to_data_buf_unsigned env =
+    let set_x, get_x = new_local env "x" in
+    let set_buf, get_buf = new_local env "buf" in
+    set_x ^^ set_buf ^^
+    get_x ^^
+    try_unbox I64Type
+      (fun env ->
+        BitTagged.untag __LINE__ env Type.Int ^^ set_x ^^
+        I32Leb.compile_store_to_data_buf_unsigned env get_x get_buf
+      )
+      (fun env ->
+        G.i Drop ^^
+        get_buf ^^ get_x ^^ Num.compile_store_to_data_buf_unsigned env)
+      env
+
+  let compile_store_to_data_buf_signed env =
+    let set_x, get_x = new_local env "x" in
+    let set_buf, get_buf = new_local env "buf" in
+    set_x ^^ set_buf ^^
+    get_x ^^
+    try_unbox I64Type
+      (fun env ->
+        BitTagged.untag __LINE__ env Type.Int ^^ set_x ^^
+        I32Leb.compile_store_to_data_buf_signed env get_x get_buf
+      )
+      (fun env ->
+        G.i Drop ^^
+        get_buf ^^ get_x ^^ Num.compile_store_to_data_buf_signed env)
+      env
+
+  let compile_data_size_unsigned env =
+    try_unbox I64Type
+      (fun _ ->
+        let set_x, get_x = new_local env "x" in
+        BitTagged.untag __LINE__ env Type.Int ^^ set_x ^^
+        I32Leb.compile_leb128_size get_x
+      )
+      (fun env -> Num.compile_data_size_unsigned env)
+      env
+
+  let compile_data_size_signed env =
+    try_unbox I64Type
+      (fun _ ->
+        let set_x, get_x = new_local env "x" in
+        BitTagged.untag __LINE__ env Type.Int ^^ set_x ^^
+        I32Leb.compile_sleb128_size get_x
+      )
+      (fun env -> Num.compile_data_size_signed env)
+      env
+
+  let from_signed_word64 env =
+    let set_a, get_a = new_local env "a" in
+    set_a ^^
+    get_a ^^ BitTagged.if_can_tag_signed env Type.Int [I64Type]
+      (get_a ^^ BitTagged.tag env Type.Int)
+      (get_a ^^ Num.from_signed_word64 env)
+
+  let from_signed_word_compact env =
+    begin
+      if TaggingScheme.debug || !(Flags.sanity)
+      then
+      let set_a, get_a = new_local env "a" in
+      set_a ^^
+      get_a ^^ BitTagged.if_can_tag_signed env Type.Int [I64Type]
+        get_a
+        (E.trap_with env "from_signed_word_compact")
+      else G.nop
+    end ^^
+    BitTagged.tag env Type.Int
+
+  let from_word64 env =
+    let set_a, get_a = new_local env "a" in
+    set_a ^^
+    get_a ^^ BitTagged.if_can_tag_unsigned env Type.Int [I64Type]
+      (get_a ^^ BitTagged.tag env Type.Int)
+      (get_a ^^ Num.from_word64 env)
+
+  let truncate_to_word64 env =
+    let set_a, get_a = new_local env "a" in
+    set_a ^^ get_a ^^
+    BitTagged.if_tagged_scalar env [I64Type]
+      (get_a ^^ BitTagged.untag __LINE__ env Type.Int)
+      (get_a ^^ Num.truncate_to_word64 env)
+
+  let truncate_to_word32 env =
+    let set_a, get_a = new_local env "a" in
+    set_a ^^ get_a ^^
+    BitTagged.if_tagged_scalar env [I64Type]
+      (get_a ^^ BitTagged.untag __LINE__ env Type.Int)
+      (get_a ^^ Num.truncate_to_word32 env)
+
+  let to_word64 env =
+    let set_a, get_a = new_local env "a" in
+    set_a ^^ get_a ^^
+    BitTagged.if_tagged_scalar env [I64Type]
+      (get_a ^^ BitTagged.untag __LINE__ env Type.Int)
+      (get_a ^^ Num.to_word64 env)
+
+  let to_word64_with env get_err_msg =
+    let set_a, get_a = new_local env "a" in
+    set_a ^^ get_a ^^
+    BitTagged.if_tagged_scalar env [I64Type]
+      (get_a ^^ BitTagged.untag __LINE__ env Type.Int)
+      (get_a ^^ Num.to_word64_with env get_err_msg)
+end
+
+module BigNumLibtommath : BigNumType = struct
+
+  let to_word64 env = E.call_import env "rts" "bigint_to_word64_trap"
+  let to_word64_with env get_err_msg = get_err_msg ^^ E.call_import env "rts" "bigint_to_word64_trap_with"
+
+  let truncate_to_word32 env = E.call_import env "rts" "bigint_to_word32_wrap" ^^ G.i (Convert (Wasm_exts.Values.I64 I64Op.ExtendUI32))
+  let truncate_to_word64 env = E.call_import env "rts" "bigint_to_word64_wrap"
+
+  let from_signed_word_compact env = E.call_import env "rts" "bigint_of_int64"
+  let from_word64 env = E.call_import env "rts" "bigint_of_word64"
+  let from_signed_word64 env = E.call_import env "rts" "bigint_of_int64"
+
+  let compile_data_size_unsigned env = E.call_import env "rts" "bigint_leb128_size"
+  let compile_data_size_signed env = E.call_import env "rts" "bigint_sleb128_size"
+
+  let compile_store_to_data_buf_unsigned env =
+    let (set_buf, get_buf) = new_local env "buf" in
+    let (set_n, get_n) = new_local env "n" in
+    set_n ^^ set_buf ^^
+    get_n ^^ get_buf ^^ E.call_import env "rts" "bigint_leb128_encode" ^^
+    get_n ^^ E.call_import env "rts" "bigint_leb128_size"
+
+  let compile_store_to_data_buf_signed env =
+    let (set_buf, get_buf) = new_local env "buf" in
+    let (set_n, get_n) = new_local env "n" in
+    set_n ^^ set_buf ^^
+    get_n ^^ get_buf ^^ E.call_import env "rts" "bigint_sleb128_encode" ^^
+    get_n ^^ E.call_import env "rts" "bigint_sleb128_size"
+
+  let compile_load_from_data_buf env get_data_buf = function
+    | false -> get_data_buf ^^ E.call_import env "rts" "bigint_leb128_decode"
+    | true -> get_data_buf ^^ E.call_import env "rts" "bigint_sleb128_decode"
+
+  let constant env n =
+    (* See enum mp_sign *)
+    let sign = if Big_int.sign_big_int n >= 0 then 0l else 1l in
+
+    let n = Big_int.abs_big_int n in
+
+    let limbs =
+      (* see MP_DIGIT_BIT for MP_64BIT *)
+      let twoto60 = Big_int.power_int_positive_int 2 60 in
+      let rec go n =
+        if Big_int.sign_big_int n = 0
+        then []
+        else
+          let (a, b) = Big_int.quomod_big_int n twoto60 in
+          StaticBytes.[ I64 (Big_int.int64_of_big_int b) ] @ go a
+      in go n
+    in
+
+    (* how many 64 bit digits *)
+    let size = Int32.of_int (List.length limbs) in  
+
+    (* cf. mp_int in tommath.h *)
+    (* Tom's math library is compiled with 64-bit `mp_digit` size. *)
+    let payload = StaticBytes.[
+      I32 size; (* used *)
+      I32 size; (* size; relying on Heap.word_size == size_of(mp_digit) *)
+      I32 sign;
+      I32 0l; (* padding because of 64-bit alignment of subsequent pointer *)
+      I64 0L; (* dp; this will be patched in BigInt::mp_int_ptr in the RTS when used *)
+    ] @ limbs
+    in
+
+    Tagged.shared_object env (fun env ->
+      let instructions = 
+        let words = StaticBytes.as_words payload in
+        List.map compile_unboxed_const words in
+      Tagged.obj env Tagged.BigInt instructions
+    )
+
+  let assert_nonneg env =
+    Func.share_code1 Func.Never env "assert_nonneg" ("n", I64Type) [I64Type] (fun env get_n ->
+      get_n ^^
+      E.call_import env "rts" "bigint_isneg" ^^ Bool.from_rts_int32 ^^
+      E.then_trap_with env "Natural subtraction underflow" ^^
+      get_n
+    )
+
+  let compile_abs env = E.call_import env "rts" "bigint_abs"
+  let compile_neg env = E.call_import env "rts" "bigint_neg"
+  let compile_add env = E.call_import env "rts" "bigint_add"
+  let compile_mul env = E.call_import env "rts" "bigint_mul"
+  let compile_signed_sub env = E.call_import env "rts" "bigint_sub"
+  let compile_signed_div env = E.call_import env "rts" "bigint_div"
+  let compile_signed_mod env = E.call_import env "rts" "bigint_rem"
+  let compile_unsigned_sub env = E.call_import env "rts" "bigint_sub" ^^ assert_nonneg env
+  let compile_unsigned_rem env = E.call_import env "rts" "bigint_rem"
+  let compile_unsigned_div env = E.call_import env "rts" "bigint_div"
+  let compile_unsigned_pow env = E.call_import env "rts" "bigint_pow"
+  let compile_lsh env = E.call_import env "rts" "bigint_lsh"
+  let compile_rsh env = E.call_import env "rts" "bigint_rsh"
+
+  let compile_eq env = E.call_import env "rts" "bigint_eq" ^^ Bool.from_rts_int32
+  let compile_is_negative env = E.call_import env "rts" "bigint_isneg" ^^ Bool.from_rts_int32
+  let compile_relop env = function
+      | Lt -> E.call_import env "rts" "bigint_lt" ^^ Bool.from_rts_int32
+      | Le -> E.call_import env "rts" "bigint_le" ^^ Bool.from_rts_int32
+      | Ge -> E.call_import env "rts" "bigint_ge" ^^ Bool.from_rts_int32
+      | Gt -> E.call_import env "rts" "bigint_gt" ^^ Bool.from_rts_int32
+
+  let fits_signed_bits env bits =
+    E.call_import env "rts" "bigint_2complement_bits" ^^
+    compile_unboxed_const (Int64.of_int bits) ^^
+    compile_comparison I64Op.LeU
+  let fits_unsigned_bits env bits =
+    E.call_import env "rts" "bigint_count_bits" ^^
+    compile_unboxed_const (Int64.of_int bits) ^^
+    compile_comparison I64Op.LeU
+
+end (* BigNumLibtommath *)
+
+module BigNum = MakeCompact(BigNumLibtommath)
+
+(* Primitive functions *)
+module Prim = struct
+  (* The {Nat,Int}{8,16,32} bits sit in the MSBs of the i64, in this manner
+     we can perform almost all operations, with the exception of
+     - Mul (needs shr of one operand)
+     - Shr (needs masking of result)
+     - Rot (needs duplication into LSBs, masking of amount and masking of result)
+     - ctz (needs shr of operand or sub from result)
+
+     Both {Nat,Int}{8,16,32} fit into the vanilla stackrep, so no boxing is necessary.
+     This MSB-stored schema is also essentially what the interpreter is using.
+  *)
+  let prim_word64toNat = BigNum.from_word64
+  let prim_shiftWordNtoUnsigned env b =
+    compile_shrU_const b ^^
+    prim_word64toNat env
+  let prim_word64toInt = BigNum.from_signed_word64
+  let prim_shiftWordNtoSigned env b =
+    compile_shrS_const b ^^
+    prim_word64toInt env
+  let prim_intToWord64 = BigNum.truncate_to_word64
+  let prim_intToWordNShifted env b =
+    prim_intToWord64 env ^^
+    TaggedSmallWord.shift_leftWordNtoI64 b
+end (* Prim *)
+
+module Blob = struct
+  (* The layout of a blob object is
+
+     ┌──────┬─────┬─────────┬──────────────────┐
+     │ obj header │ n_bytes │ bytes (padded) … │
+     └──────┴─────┴─────────┴──────────────────┘
+
+    The object header includes the object tag (Blob) and the forwarding pointer.
+
+    This heap object is used for various kinds of binary, non-pointer data.
+
+    When used for Text values, the bytes are UTF-8 encoded code points from
+    Unicode.
+  *)
+
+  let header_size = Int64.add Tagged.header_size 1L
+  let len_field = Int64.add Tagged.header_size 0L
+
+  let len env =
+    Tagged.load_forwarding_pointer env ^^
+    Tagged.load_field env len_field
+
+  let len_nat env =
+    Func.share_code1 Func.Never env "blob_len" ("text", I64Type) [I64Type] (fun env get ->
+      get ^^
+      len env ^^
+      BigNum.from_word64 env
+    )
+
+  let alloc env sort len =
+    compile_unboxed_const Tagged.(int_of_tag (Blob sort)) ^^
+    len ^^
+    E.call_import env "rts" "alloc_blob" ^^
+    (* uninitialized blob payload is allowed by the barrier *)
+    Tagged.allocation_barrier env
+
+  let unskewed_payload_offset env = Int64.(add ptr_unskew (mul Heap.word_size header_size))
+  
+  let payload_ptr_unskewed env =
+    Tagged.load_forwarding_pointer env ^^
+    compile_add_const (unskewed_payload_offset env)
+
+  let load_data_segment env sort segment_index data_length =
+    let (set_blob, get_blob) = new_local env "data_segment_blob" in
+    alloc env sort data_length ^^ set_blob ^^
+    get_blob ^^ payload_ptr_unskewed env ^^ (* target address *)
+    compile_const_32 0l ^^ (* data offset *)
+    data_length ^^ G.i (Convert (Wasm_exts.Values.I32 I32Op.WrapI64)) ^^
+    G.i (MemoryInit (nr segment_index)) ^^
+    get_blob
+
+  let constant env sort payload =
+    Tagged.shared_object env (fun env -> 
+      let blob_length = Int64.of_int (String.length payload) in
+      let segment_index = E.add_static env StaticBytes.[Bytes payload] in
+      load_data_segment env sort segment_index (compile_unboxed_const blob_length)
+    )
+
+  let lit env sort payload =
+    Tagged.materialize_shared_value env (constant env sort payload)
+
+  let as_ptr_len env = Func.share_code1 Func.Never env "as_ptr_size" ("x", I64Type) [I64Type; I64Type] (
+    fun env get_x ->
+      get_x ^^ payload_ptr_unskewed env ^^
+      get_x ^^ len env
+    )
+
+  let lit_ptr_len env sort s =
+    lit env sort s ^^
+    as_ptr_len env
+
+  let load_data_segment env sort segment_index data_length =
+    let (set_blob, get_blob) = new_local env "data_segment_blob" in
+    alloc env sort data_length ^^ set_blob ^^
+    get_blob ^^ payload_ptr_unskewed env ^^ (* target address *)
+    compile_const_32 0l ^^ (* data offset *)
+    data_length ^^ G.i (Convert (Wasm_exts.Values.I32 I32Op.WrapI64)) ^^
+    G.i (MemoryInit (nr segment_index)) ^^
+    get_blob
+  
+  let of_ptr_size env = Func.share_code2 Func.Always env "blob_of_ptr_size" (("ptr", I64Type), ("size" , I64Type)) [I64Type] (
+    fun env get_ptr get_size ->
+      let (set_x, get_x) = new_local env "x" in
+      alloc env Tagged.B get_size ^^ set_x ^^
+      get_x ^^ payload_ptr_unskewed env ^^
+      get_ptr ^^
+      get_size ^^
+      Heap.memcpy env ^^
+      get_x
+    )
+
+  let copy env src_sort dst_sort =
+    let name = Printf.sprintf "blob_copy_%s_%s"
+                 (Int64.to_string (Tagged.int_of_tag (Tagged.Blob src_sort)))
+                 (Int64.to_string (Tagged.int_of_tag (Tagged.Blob dst_sort)))
+    in
+    Func.share_code1 Func.Never env name ("src", I64Type) [I64Type] (
+      fun env get_src ->
+       let (set_dst, get_dst) = new_local env "dst" in
+       alloc env dst_sort (get_src ^^ len env) ^^ set_dst ^^
+       get_dst ^^ payload_ptr_unskewed env ^^
+       get_src ^^ Tagged.sanity_check_tag __LINE__ env (Tagged.Blob src_sort) ^^
+       as_ptr_len env ^^
+       Heap.memcpy env ^^
+       get_dst
+    )
+
+  let of_size_copy env sort get_size_fun copy_fun offset_fun =
+    let (set_len, get_len) = new_local env "len" in
+    let (set_blob, get_blob) = new_local env "blob" in
+    get_size_fun env ^^ set_len ^^
+
+    alloc env sort get_len ^^ set_blob ^^
+    get_blob ^^ payload_ptr_unskewed env ^^
+    offset_fun env ^^
+    get_len ^^
+    copy_fun env ^^
+
+    get_blob
+
+  (* Lexicographic blob comparison. Expects two blobs on the stack.
+     Either specialized to a specific comparison operator, and returns a boolean,
+     or implements the generic comparison, returning -1, 0 or 1 as Int64.
+  *)
+  let rec compare env op =
+    (* return convention for the generic comparison function *)
+    let is_lt = compile_unboxed_const (-1L) in
+    let is_gt = compile_unboxed_const 1L in
+    let is_eq = compile_unboxed_const 0L in
+    let open Operator in
+    let name = match op with
+        | Some LtOp -> "Blob.compare_lt"
+        | Some LeOp -> "Blob.compare_le"
+        | Some GeOp -> "Blob.compare_ge"
+        | Some GtOp -> "Blob.compare_gt"
+        | Some EqOp -> "Blob.compare_eq"
+        | Some NeqOp -> "Blob.compare_neq"
+        | None -> "Blob.compare" in
+    Func.share_code2 Func.Always env name (("x", I64Type), ("y", I64Type)) [I64Type] (fun env get_x get_y ->
+      match op with
+        (* Some operators can be reduced to the negation of other operators *)
+        | Some LtOp -> get_x ^^ get_y ^^ compare env (Some GeOp) ^^ Bool.neg
+        | Some GtOp -> get_x ^^ get_y ^^ compare env (Some LeOp) ^^ Bool.neg
+        | Some NeqOp -> get_x ^^ get_y ^^ compare env (Some EqOp) ^^ Bool.neg
+        | _ ->
+      begin
+        let set_x = G.setter_for get_x in
+        let set_y = G.setter_for get_y in
+        get_x ^^ Tagged.load_forwarding_pointer env ^^ set_x ^^
+        get_y ^^ Tagged.load_forwarding_pointer env ^^ set_y ^^
+
+        let (set_len1, get_len1) = new_local env "len1" in
+        let (set_len2, get_len2) = new_local env "len2" in
+        let (set_len, get_len) = new_local env "len" in
+        let (set_a, get_a) = new_local env "a" in
+        let (set_b, get_b) = new_local env "b" in
+
+        get_x ^^ len env ^^ set_len1 ^^
+        get_y ^^ len env ^^ set_len2 ^^
+
+        (* Find minimum length *)
+        begin if op = Some EqOp then
+          (* Early exit for equality *)
+          get_len1 ^^ get_len2 ^^ compile_comparison I64Op.Eq ^^
+          E.if0 G.nop (Bool.lit false ^^ G.i Return) ^^
+
+          get_len1 ^^ set_len
+        else
+          get_len1 ^^ get_len2 ^^ compile_comparison I64Op.LeU ^^
+          E.if0
+            (get_len1 ^^ set_len)
+            (get_len2 ^^ set_len)
+        end ^^
+
+        (* We could do word-wise comparisons if we know that the trailing bytes
+           are zeroed *)
+        get_len ^^
+        from_0_to_n env (fun get_i ->
+          get_x ^^
+          payload_ptr_unskewed env ^^
+          get_i ^^
+          G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)) ^^
+          G.i (Load {ty = I64Type; align = 0; offset = 0L; sz = Some Wasm_exts.Types.(Pack8, ZX)}) ^^
+          set_a ^^
+
+          get_y ^^
+          payload_ptr_unskewed env ^^
+          get_i ^^
+          G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)) ^^
+          G.i (Load {ty = I64Type; align = 0; offset = 0L; sz = Some Wasm_exts.Types.(Pack8, ZX)}) ^^
+          set_b ^^
+
+          get_a ^^ get_b ^^ compile_comparison I64Op.Eq ^^
+          E.if0 G.nop (
+            (* first non-equal elements *)
+            begin match op with
+            | Some LeOp -> get_a ^^ get_b ^^ compile_comparison I64Op.LeU
+            | Some GeOp -> get_a ^^ get_b ^^ compile_comparison I64Op.GeU
+            | Some EqOp -> Bool.lit false
+            | None -> get_a ^^ get_b ^^ compile_comparison I64Op.LtU ^^
+                      E.if1 I64Type is_lt is_gt
+            | _ -> assert false
+            end ^^
+            G.i Return
+          )
+        ) ^^
+        (* Common prefix is same *)
+        match op with
+        | Some LeOp -> get_len1 ^^ get_len2 ^^ compile_comparison I64Op.LeU
+        | Some GeOp -> get_len1 ^^ get_len2 ^^ compile_comparison I64Op.GeU
+        | Some EqOp -> Bool.lit true (* NB: Different length handled above *)
+        | None ->
+            get_len1 ^^ get_len2 ^^ compile_comparison I64Op.LtU ^^
+            E.if1 I64Type is_lt (
+              get_len1 ^^ get_len2 ^^ compile_comparison I64Op.GtU ^^
+              E.if1 I64Type is_gt is_eq
+            )
+        | _ -> assert false
+      end
+  )
+
+  let iter env =
+    E.call_import env "rts" "blob_iter"
+  let iter_done env =
+    E.call_import env "rts" "blob_iter_done"
+  let iter_next env =
+    E.call_import env "rts" "blob_iter_next" ^^
+    TaggedSmallWord.msb_adjust Type.Nat8
+
+  let dyn_alloc_scratch env =
+    let (set_len, get_len) = new_local env "len" in
+    set_len ^^
+    alloc env Tagged.B get_len ^^ payload_ptr_unskewed env
+
+end (* Blob *)
+
+module Object = struct
+  (* An object with a mutable field1 and immutable field 2 has the following
+     heap layout:
+ 
+     ┌──────┬─────┬──────────┬─────────┬─────────────┬───┐
+     │ obj header │ hash_ptr │ ind_ptr │ field2_data │ … │
+     └──────┴─────┴┬─────────┴┬────────┴─────────────┴───┘
+          ┌────────┘          │
+          │   ┌───────────────┘
+          │   ↓
+          │  ╶─┬────────┬─────────────┐
+          │    │ MutBox │ field1_data │
+          ↓    └────────┴─────────────┘
+          ┌─────────────┬─────────────┬─────────────┬───┐
+          │ blob header │ field1_hash │ field2_hash │ … │
+          └─────────────┴─────────────┴─────────────┴───┘        
+ 
+     The object header includes the object tag (Object) and the forwarding pointer.
+     The size of the object (number of fields) can be derived from the hash blob via `hash_ptr`.
+
+     The field hashes reside in a blob inside the dynamic heap.
+     The hash blob needs to be tracked by the GC, but not the content of the hash blob.
+     This is because the hash values are plain numbers that would look like skewed pointers.ters.
+     The hash_ptr is skewed.
+ 
+     The field2_data for immutable fields is a vanilla word.
+ 
+     The field1_data for mutable fields are pointers to a MutBox. This indirection 
+     is a consequence of how we compile object literals with `await` instructions, 
+     as these mutable fields need to be able to alias local mutable variables, e.g.
+     `{ public let f = 1; await async (); public let var v = 2}`.
+     Other use cases are object constructors with public and private mutable fields, 
+     where the physical record only wraps the public fields.
+     Moreover, closures can selectively capture the individual fields instead of 
+     the containing object.
+     Finally, classical Candid stabilization/destabilization also relies on the 
+     indirection of mutable fields, to reserve and store alias information in those 
+     locations.
+ 
+     We could alternatively switch to an allocate-first approach in the
+     await-translation of objects, and get rid of this indirection -- if it were
+     not for the implementing of sharing of mutable stable values.
+   *)
+ 
+  let header_size = Int64.add Tagged.header_size 1L
+ 
+  let hash_ptr_field = Int64.add Tagged.header_size 0L
+ 
+  module FieldEnv = Env.Make(String)
+
+  (* This is for non-recursive objects. *)
+  (* The instructions in the field already create the indirection if needed *)
+  let object_builder env (fs : (string * (E.t -> G.t)) list ) =
+    let name_pos_map =
+      fs |>
+        (* We could store only public fields in the object, but
+          then we need to allocate separate boxes for the non-public ones:
+          List.filter (fun (_, vis, f) -> vis.it = Public) |>
+        *)
+        List.map (fun (n,_) -> (E.hash env n, n)) |>
+        List.sort compare |>
+        List.mapi (fun i (_h,n) -> (n,Int64.of_int i)) |>
+        List.fold_left (fun m (n,i) -> FieldEnv.add n i m) FieldEnv.empty in
+
+      let sz = Int64.of_int (FieldEnv.cardinal name_pos_map) in
+
+      (* Create hash blob *)
+      let hashes = fs |>
+        List.map (fun (n,_) -> E.hash env n) |>
+        List.sort compare in
+      let hash_blob =
+        let hash_payload = StaticBytes.[ i64s hashes ] in
+        Blob.constant env Tagged.B (StaticBytes.as_bytes hash_payload)
+      in
+      
+      (fun env -> 
+        (* Allocate memory *)
+        let (set_ri, get_ri, ri) = new_local_ env I64Type "obj" in
+        Tagged.alloc env (Int64.add header_size sz) Tagged.Object ^^
+        set_ri ^^
+
+        (* Set hash_ptr *)
+        get_ri ^^
+        Tagged.materialize_shared_value env hash_blob ^^
+        Tagged.store_field env hash_ptr_field ^^
+
+        (* Write all the fields *)
+        let init_field (name, generate_value) : G.t =
+          (* Write the pointer to the indirection *)
+          get_ri ^^
+          generate_value env ^^
+          let i = FieldEnv.find name name_pos_map in
+          let offset = Int64.add header_size i in
+          Tagged.store_field env offset
+        in
+        G.concat_map init_field fs ^^
+
+        (* Return the pointer to the object *)
+        get_ri ^^
+        Tagged.allocation_barrier env
+      )
+
+  let constant env (fs : (string * E.shared_value) list) =
+    let materialize_fields = List.map (fun (name, value) -> (name, fun env -> Tagged.materialize_shared_value env value)) fs in
+    let allocation = object_builder env materialize_fields in
+    Tagged.shared_object env allocation
+
+  (* This is for non-recursive objects, i.e. ObjNewE *)
+  (* The instructions in the field already create the indirection if needed *)
+  let lit_raw env (fs : (string * (unit -> G.t)) list ) =
+    let materialize_fields = List.map (fun (name, generate_value) -> (name, (fun env -> generate_value ()))) fs in
+    let allocation = object_builder env materialize_fields in
+    allocation env
+
+  (* Reflection used by orthogonal persistence: 
+     Check whether an (actor) object contains a specific field *)
+  let contains_field env field =
+    compile_unboxed_const (E.hash env field) ^^
+    E.call_import env "rts" "contains_field" ^^
+    Bool.from_rts_int32
+ 
+  (* Returns a pointer to the object field (without following the field indirection) *)
+  let idx_hash_raw env low_bound =
+    let name = Printf.sprintf "obj_idx<%d>" low_bound  in
+    Func.share_code2 Func.Always env name (("x", I64Type), ("hash", I64Type)) [I64Type] (fun env get_x get_hash ->
+      let set_x = G.setter_for get_x in
+      let set_h_ptr, get_h_ptr = new_local env "h_ptr" in
+
+      get_x ^^ Tagged.load_forwarding_pointer env ^^ set_x ^^
+
+      get_x ^^ Tagged.load_field env hash_ptr_field ^^
+      Blob.payload_ptr_unskewed env ^^
+
+      (* Linearly scan through the fields (binary search can come later) *)
+      (* unskew h_ptr and advance both to low bound *)
+      compile_add_const Int64.(mul Heap.word_size (of_int low_bound)) ^^
+      set_h_ptr ^^
+      get_x ^^
+      compile_add_const Int64.(mul Heap.word_size (add header_size (of_int low_bound))) ^^
+      set_x ^^
+      G.loop0 (
+          get_h_ptr ^^ load_unskewed_ptr ^^
+          get_hash ^^ compile_comparison I64Op.Eq ^^
+          E.if0
+            (get_x ^^ G.i Return)
+            (get_h_ptr ^^ compile_add_const Heap.word_size ^^ set_h_ptr ^^
+            get_x ^^ compile_add_const Heap.word_size ^^ set_x ^^
+            G.i (Br (nr 1l)))
+        ) ^^
+      G.i Unreachable
+    )
+
+  (* Returns a pointer to the object field (possibly following the indirection) *)
+  let idx_hash env low_bound indirect =
+    if indirect
+    then
+      let name = Printf.sprintf "obj_idx_ind<%d>" low_bound in
+      Func.share_code2 Func.Never env name (("x", I64Type), ("hash", I64Type)) [I64Type] (fun env get_x get_hash ->
+      get_x ^^ get_hash ^^
+      idx_hash_raw env low_bound ^^
+      load_ptr ^^ Tagged.load_forwarding_pointer env ^^
+      compile_add_const (Int64.mul MutBox.field Heap.word_size)
+    )
+    else idx_hash_raw env low_bound
+
+  let field_type env obj_type s =
+    let _, fields = Type.as_obj_sub [s] obj_type in
+    Type.lookup_val_field s fields
+
+  (* Determines whether the field is mutable (and thus needs an indirection) *)
+  let is_mut_field env obj_type s =
+    let _, fields = Type.as_obj_sub [s] obj_type in
+    Type.is_mut (Type.lookup_val_field s fields)
+
+  (* Computes a lower bound for the positional index of a field in an object *)
+  let field_lower_bound env obj_type s =
+    let open Type in
+    let _, fields = as_obj_sub [s] obj_type in
+    List.iter (function {typ = Typ _; _} -> assert false | _ -> ()) fields;
+    let sorted_by_hash =
+      List.sort
+        (fun (h1, _) (h2, _) -> compare_uint64 h1 h2)
+        (List.map (fun f -> E.hash env f.lab, f) fields) in
+    match Lib.List.index_of s (List.map (fun (_, {lab; _}) -> lab) sorted_by_hash) with
+    | Some i -> i
+    | _ -> assert false
+
+  (* Returns a pointer to the object field (without following the indirection) *)
+  let idx_raw env f =
+    compile_unboxed_const (E.hash env f) ^^
+    idx_hash_raw env 0
+
+  (* Returns a pointer to the object field (possibly following the indirection) *)
+  let idx env obj_type f =
+    compile_unboxed_const (E.hash env f) ^^
+    idx_hash env (field_lower_bound env obj_type f) (is_mut_field env obj_type f)
+
+  (* load the value (or the mutbox) *)
+  let load_idx_raw env f =
+    idx_raw env f ^^
+    load_ptr
+
+  (* load the actual value (dereferencing the mutbox) *)
+  let load_idx env obj_type f =
+    idx env obj_type f ^^
+    load_ptr
+ 
+end (* Object *) 
+
+module Region = struct
+  (*
+    See rts/motoko-rts/src/region.rs
+   *)
+
+  (* Object layout:
+
+     ┌─────┬──────────┬──────────────────┬─────────────────┐
+     │ tag │ id_field │ page_count_field │ vec_pages_field │
+     └─────┴──────────┴──────────────────┴─────────────────┘
+            (unboxed, low 16 bits, rest 0-initialized padding)
+                        unboxed u32
+                                          Blob
+  *)
+
+  let alloc_region env =
+    E.call_import env "rts" "alloc_region"
+
+  let init_region env =
+    E.call_import env "rts" "init_region"
+
+  (* field accessors *)
+  (* NB: all these opns must resolve forwarding pointers here or in RTS *)
+  let id env =
+    E.call_import env "rts" "region_id"
+
+  let page_count env =
+    E.call_import env "rts" "region_page_count"
+
+  let vec_pages env =
+    E.call_import env "rts" "region_vec_pages"
+
+  let new_ env =
+    E.require_stable_memory env;
+    E.call_import env "rts" "region_new"
+
+  let size env =
+    E.require_stable_memory env;
+    E.call_import env "rts" "region_size"
+
+  let grow env =
+    E.require_stable_memory env;
+    E.call_import env "rts" "region_grow"
+
+  let load_blob env =
+    E.require_stable_memory env;
+    E.call_import env "rts" "region_load_blob"
+  let store_blob env =
+    E.require_stable_memory env;
+    E.call_import env "rts" "region_store_blob"
+
+  let load_word8 env =
+    E.require_stable_memory env;
+    E.call_import env "rts" "region_load_word8"
+  let store_word8 env =
+    E.require_stable_memory env;
+    E.call_import env "rts" "region_store_word8"
+
+  let load_word16 env =
+    E.require_stable_memory env;
+    E.call_import env "rts" "region_load_word16"
+  let store_word16 env =
+    E.require_stable_memory env;
+    E.call_import env "rts" "region_store_word16"
+
+  let load_word32 env =
+    E.require_stable_memory env;
+    E.call_import env "rts" "region_load_word32"
+  let store_word32 env =
+    E.require_stable_memory env;
+    E.call_import env "rts" "region_store_word32"
+
+  let load_word64 env =
+    E.require_stable_memory env;
+    E.call_import env "rts" "region_load_word64"
+  let store_word64 env =
+    E.require_stable_memory env;
+    E.call_import env "rts" "region_store_word64"
+
+  let load_float64 env =
+    E.require_stable_memory env;
+    E.call_import env "rts" "region_load_float64"
+  let store_float64 env =
+    E.require_stable_memory env;
+    E.call_import env "rts" "region_store_float64"
+
+end
+
+module Text = struct
+  (*
+  Most of the heavy lifting around text values is in rts/motoko-rts/src/text.rs
+  *)
+
+  (* The layout of a concatenation node is
+
+     ┌──────┬─────┬─────────┬───────┬───────┐
+     │ obj header │ n_bytes │ text1 │ text2 │
+     └──────┴─────┴─────────┴───────┴───────┘
+
+    The object header includes the object tag (TAG_CONCAT defined in rts/types.rs) and the forwarding pointer
+
+    This is internal to rts/text.c, with the exception of GC-related code.
+  *)
+
+  let of_ptr_size env =
+    E.call_import env "rts" "text_of_ptr_size"
+  let concat env =
+    E.call_import env "rts" "text_concat"
+  let size env =
+    E.call_import env "rts" "text_size"
+  let to_buf env =
+    E.call_import env "rts" "text_to_buf"
+  let len_nat env =
+    Func.share_code1 Func.Never env "text_len" ("text", I64Type) [I64Type] (fun env get ->
+      get ^^
+      E.call_import env "rts" "text_len" ^^
+      BigNum.from_word64 env
+    )
+  let prim_showChar env =
+    TaggedSmallWord.lsb_adjust_codepoint env ^^
+    G.i (Convert (Wasm_exts.Values.I32 I32Op.WrapI64)) ^^
+    E.call_import env "rts" "text_singleton"
+  let to_blob env = E.call_import env "rts" "blob_of_text"
+
+  let lowercase env = E.call_import env "rts" "text_lowercase"
+  let uppercase env = E.call_import env "rts" "text_uppercase"
+
+  let of_blob env =
+    let (set_blob, get_blob) = new_local env "blob" in
+    set_blob ^^
+    get_blob ^^ Blob.as_ptr_len env ^^
+    E.call_import env "rts" "utf8_valid" ^^
+    Bool.from_rts_int32 ^^
+    E.if1 I64Type
+      (get_blob ^^ Blob.copy env Tagged.B Tagged.T ^^
+       set_blob ^^
+       Opt.inject_simple env get_blob)
+      (Opt.null_lit env)
+
+  let iter env =
+    E.call_import env "rts" "text_iter"
+  let iter_done env =
+    E.call_import env "rts" "text_iter_done"
+  let iter_next env =
+    E.call_import env "rts" "text_iter_next" ^^ Bool.from_rts_int32 ^^
+    TaggedSmallWord.msb_adjust_codepoint
+
+  let compare env op =
+    let open Operator in
+    let name = match op with
+        | LtOp -> "Text.compare_lt"
+        | LeOp -> "Text.compare_le"
+        | GeOp -> "Text.compare_ge"
+        | GtOp -> "Text.compare_gt"
+        | EqOp -> "Text.compare_eq"
+        | NeqOp -> assert false in
+    Func.share_code2 Func.Never env name (("x", I64Type), ("y", I64Type)) [I64Type] (fun env get_x get_y ->
+      get_x ^^ Tagged.load_forwarding_pointer env ^^
+      get_y ^^ Tagged.load_forwarding_pointer env ^^
+      E.call_import env "rts" "text_compare" ^^
+      compile_unboxed_const 0L ^^
+      match op with
+        | LtOp -> compile_comparison I64Op.LtS
+        | LeOp -> compile_comparison I64Op.LeS
+        | GtOp -> compile_comparison I64Op.GtS
+        | GeOp -> compile_comparison I64Op.GeS
+        | EqOp -> compile_comparison I64Op.Eq
+        | NeqOp -> assert false
+    )
+
+
+end (* Text *)
+
+module Arr = struct
+  (* Object layout:
+
+     ┌──────┬─────┬──────────┬────────┬───┐
+     │ obj header │ n_fields │ field1 │ … │
+     └──────┴─────┴──────────┴────────┴───┘
+
+     The object  header includes the object tag (Array) and the forwarding pointer.
+
+     No difference between mutable and immutable arrays.
+  *)
+
+  (* NB max_array_size must agree with limit 2^61 imposed by RTS alloc_array() *)
+  let max_array_size = Int64.shift_left 1L 61 (* inclusive *)
+
+  let header_size = Int64.add Tagged.header_size 1L
+  let element_size = 8L
+  let len_field = Int64.add Tagged.header_size 0L
+
+  let len env =
+    Tagged.load_forwarding_pointer env ^^
+    Tagged.load_field env len_field
+
+  (* Static array access. No checking *)
+  let load_field env n =
+    Tagged.load_forwarding_pointer env ^^
+    Tagged.load_field env Int64.(add n header_size)
+
+  (* Dynamic array access. Returns the address (not the value) of the field.
+     Does no bounds checking *)
+  let unsafe_idx env =
+    Func.share_code2 Func.Never env "Array.unsafe_idx" (("array", I64Type), ("idx", I64Type)) [I64Type] (fun env get_array get_idx ->
+      get_idx ^^
+      compile_add_const header_size ^^
+      compile_mul_const element_size ^^
+      get_array ^^
+      Tagged.load_forwarding_pointer env ^^
+      G.i (Binary (Wasm_exts.Values.I64 I64Op.Add))
+    )
+
+  (* Dynamic array access. Returns the address (not the value) of the field.
+     Does bounds checking *)
+  let idx env =
+    Func.share_code2 Func.Never env "Array.idx" (("array", I64Type), ("idx", I64Type)) [I64Type] (fun env get_array get_idx ->
+      (* No need to check the lower bound, we interpret idx as unsigned *)
+      (* Check the upper bound *)
+      get_idx ^^
+      get_array ^^ len env ^^
+      compile_comparison I64Op.LtU ^^
+      E.else_trap_with env "Array index out of bounds" ^^
+
+      get_idx ^^
+      compile_add_const header_size ^^
+      compile_mul_const element_size ^^
+      get_array ^^
+      Tagged.load_forwarding_pointer env ^^
+      G.i (Binary (Wasm_exts.Values.I64 I64Op.Add))
+    )
+
+  (* As above, but taking a bigint (Nat), and reporting overflow as out of bounds *)
+  let idx_bigint env =
+    Func.share_code2 Func.Never env "Array.idx_bigint" (("array", I64Type), ("idx", I64Type)) [I64Type] (fun env get_array get_idx ->
+      get_array ^^
+      get_idx ^^
+      BigNum.to_word64_with env (Blob.lit env Tagged.T "Array index out of bounds") ^^
+      idx env
+  )
+
+  let element_type env typ = match Type.promote typ with
+     | Type.Array element_type -> element_type
+     | _ -> assert false
+
+  (* Compile an array literal. *)
+  let lit env sort element_instructions =
+    Tagged.obj env Tagged.(Array sort)
+     ([ compile_unboxed_const (Wasm.I64.of_int_u (List.length element_instructions))
+      ] @ element_instructions)
+
+  let constant env sort elements =
+    Tagged.shared_object env (fun env ->
+      let materialized_elements = List.map (fun element -> Tagged.materialize_shared_value env element) elements in
+      lit env sort materialized_elements
+    )
+
+  (* Does not initialize the fields! *)
+  (* Note: Post allocation barrier must be applied after initialization *)
+  let alloc env array_sort len =
+    compile_unboxed_const Tagged.(int_of_tag (Array array_sort)) ^^
+    len ^^
+    E.call_import env "rts" "alloc_array"
+
+  let iterate env get_array body =
+    let (set_boundary, get_boundary) = new_local env "boundary" in
+    let (set_pointer, get_pointer) = new_local env "pointer" in
+    let set_array = G.setter_for get_array in
+
+    get_array ^^ Tagged.load_forwarding_pointer env ^^ set_array ^^
+
+    (* Initial element pointer, skewed *)
+    compile_unboxed_const header_size ^^
+    compile_mul_const element_size ^^
+    get_array ^^
+    G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)) ^^
+    set_pointer ^^
+
+    (* Upper pointer boundary, skewed *)
+    get_array ^^
+    Tagged.load_field env len_field ^^
+    compile_mul_const element_size ^^
+    get_pointer ^^
+    G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)) ^^
+    set_boundary ^^
+
+    (* Loop through all elements *)
+    compile_while env
+    ( get_pointer ^^
+      get_boundary ^^
+      compile_comparison I64Op.LtU
+    ) (
+      body get_pointer ^^
+
+      (* Next element pointer, skewed *)
+      get_pointer ^^
+      compile_add_const element_size ^^
+      set_pointer
+    )
+
+  (* The primitive operations *)
+  (* No need to wrap them in RTS functions: They occur only once, in the prelude. *)
+  let init env =
+    let (set_x, get_x) = new_local env "x" in
+    let (set_r, get_r) = new_local env "r" in
+    set_x ^^
+
+    (* Allocate *)
+    BigNum.to_word64 env ^^
+    set_r ^^
+    alloc env Tagged.M get_r ^^
+    set_r ^^
+
+    (* Write elements *)
+    iterate env get_r (fun get_pointer ->
+      get_pointer ^^
+      get_x ^^
+      store_ptr
+    ) ^^
+
+    get_r ^^
+    Tagged.allocation_barrier env
+
+
+  let tabulate env =
+    let (set_f, get_f) = new_local env "f" in
+    let (set_r, get_r) = new_local env "r" in
+    let (set_i, get_i) = new_local env "i" in
+    set_f ^^
+
+    (* Allocate *)
+    BigNum.to_word64 env ^^
+    set_r ^^
+    alloc env Tagged.I get_r ^^
+    set_r ^^
+
+    (* Initial index *)
+    compile_unboxed_const 0L ^^
+    set_i ^^
+
+    (* Write elements *)
+    iterate env get_r (fun get_pointer ->
+      get_pointer ^^
+      (* The closure *)
+      get_f ^^
+      Closure.prepare_closure_call env ^^
+      (* The arg *)
+      get_i ^^
+      BigNum.from_word64 env ^^
+      (* The closure again *)
+      get_f ^^
+      (* Call *)
+      Closure.call_closure env 1 1 ^^
+      store_ptr ^^
+
+      (* Increment index *)
+      get_i ^^
+      compile_add_const 1L ^^
+      set_i
+    ) ^^
+    get_r ^^
+    Tagged.allocation_barrier env
+
+  let ofBlob env sort =
+    let name = Tagged.(match sort with I -> "Arr.ofBlob" | M -> "Arr.ofBlobMut" | _ -> assert false) in
+    Func.share_code1 Func.Always env name ("blob", I64Type) [I64Type] (fun env get_blob ->
+      let (set_len, get_len) = new_local env "len" in
+      let (set_r, get_r) = new_local env "r" in
+
+      get_blob ^^ Blob.len env ^^ set_len ^^
+
+      alloc env sort get_len ^^ set_r ^^
+
+      get_len ^^ from_0_to_n env (fun get_i ->
+        get_r ^^ get_i ^^ unsafe_idx env ^^
+        get_blob ^^ Blob.payload_ptr_unskewed env ^^
+        get_i ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)) ^^
+        G.i (Load {ty = I64Type; align = 0; offset = 0L; sz = Some Wasm_exts.Types.(Pack8, ZX)}) ^^
+        TaggedSmallWord.msb_adjust Type.Nat8 ^^
+        TaggedSmallWord.tag env Type.Nat8 ^^
+        store_ptr
+      ) ^^
+      get_r ^^
+      Tagged.allocation_barrier env
+    )
+
+  let toBlob env =
+    Func.share_code1 Func.Always env "Arr.toBlob" ("array", I64Type) [I64Type] (fun env get_a ->
+      let (set_len, get_len) = new_local env "len" in
+      let (set_r, get_r) = new_local env "r" in
+
+      get_a ^^ len env ^^ set_len ^^
+
+      Blob.alloc env Tagged.B get_len ^^ set_r ^^
+
+      get_len ^^ from_0_to_n env (fun get_i ->
+        get_r ^^ Blob.payload_ptr_unskewed env ^^
+        get_i ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)) ^^
+        get_a ^^ get_i ^^ unsafe_idx env ^^
+        load_ptr ^^
+        TaggedSmallWord.lsb_adjust Type.Nat8 ^^
+        G.i (Store {ty = I64Type; align = 0; offset = 0L; sz = Some Wasm_exts.Types.Pack8})
+      ) ^^
+
+      get_r
+    )
+
+end (* Array *)
+
+module Tuple = struct
+  (* Tuples use the same object representation (and same tag) as arrays.
+     Even though we know the size statically, we still need the size
+     information for the GC.
+
+     One could introduce tags for small tuples, to save one word.
+  *)
+
+  (* We represent the boxed empty tuple as the unboxed scalar 0, i.e. simply as
+     number (but really anything is fine, we never look at this) *)
+  let unit_vanilla_lit env = TaggingScheme.unit_tag  (* all tag, trivial payload *)
+  let compile_unit env = compile_unboxed_const (unit_vanilla_lit ())
+
+  (* Expects on the stack the pointer to the array. *)
+  let load_n env n =
+    Tagged.load_forwarding_pointer env ^^
+    Tagged.(sanity_check_tag __LINE__ env (Array T)) ^^
+    Tagged.load_field env (Int64.add Arr.header_size n)
+
+  (* Takes n elements of the stack and produces an argument tuple *)
+  let from_stack env n =
+    if n = 0 then compile_unit env
+    else
+      let name = Printf.sprintf "to_%i_tuple" n in
+      let args = Lib.List.table n (fun i -> Printf.sprintf "arg%i" i, I64Type) in
+      Func.share_code Func.Never env name args [I64Type] (fun env getters ->
+        Arr.lit env Tagged.T (Lib.List.table n (fun i -> List.nth getters i))
+      )
+
+  (* Takes an argument tuple and puts the elements on the stack: *)
+  let to_stack env n =
+    if n = 0 then G.i Drop else
+    begin
+      let name = Printf.sprintf "from_%i_tuple" n in
+      let retty = Lib.List.make n I64Type in
+      Func.share_code1 Func.Never env name ("tup", I64Type) retty (fun env get_tup ->
+        G.table n (fun i -> get_tup ^^ load_n env (Int64.of_int i))
+      )
+    end
+
+end (* Tuple *)
+
+module Lifecycle = struct
+  (*
+  This module models the life cycle of a canister as a very simple state machine,
+  keeps track of the current state of the canister, and traps noisily if an
+  unexpected transition happens. Such a transition would either be a bug in the
+  underlying system, or in our RTS.
+  *)
+
+  type state =
+    | PreInit
+  (* We do not use the (start) function when compiling canisters, so skip
+     these two:
+    | InStart
+    | Started (* (start) has run *)
+  *)
+    | InInit (* canister_init *)
+    | Idle (* basic steady state *)
+    | InUpdate
+    | InQuery
+    | PostQuery (* an invalid state *)
+    | InPreUpgrade
+    | PostPreUpgrade (* an invalid state *)
+    | InPostUpgrade
+    | InComposite
+    | InStabilization (* stabilization before upgrade *)
+    | InDestabilization (* destabilization after upgrade *)
+
+  let string_of_state state = match state with
+    | PreInit -> "PreInit"
+    | InInit -> "InInit"
+    | Idle -> "Idle"
+    | InUpdate -> "InUpdate"
+    | InQuery -> "InQuery"
+    | PostQuery -> "PostQuery"
+    | InPreUpgrade -> "InPreUpgrade"
+    | PostPreUpgrade -> "PostPreUpgrade"
+    | InPostUpgrade -> "InPostUpgrade"
+    | InComposite -> "InComposite"
+    | InStabilization -> "InStabilization"
+    | InDestabilization -> "InDestabilization"
+
+  let int_of_state = function
+    | PreInit -> 0L (* Automatically null *)
+    (*
+    | InStart -> 1L
+    | Started -> 2L
+    *)
+    | InInit -> 3L
+    | Idle -> 4L
+    | InUpdate -> 5L
+    | InQuery -> 6L
+    | PostQuery -> 7L
+    | InPreUpgrade -> 8L
+    | PostPreUpgrade -> 9L
+    | InPostUpgrade -> 10L
+    | InComposite -> 11L
+    | InStabilization -> 12L
+    | InDestabilization -> 13L
+
+  let ptr () = Stack.end_ ()
+  let end_ () = Int64.add (Stack.end_ ()) Heap.word_size
+
+  (* Which states may come before this *)
+  let pre_states = function
+    | PreInit -> []
+    (*
+    | InStart -> [PreInit]
+    | Started -> [InStart]
+    *)
+    | InInit -> [PreInit]
+    | Idle -> [InInit; InUpdate; InPostUpgrade; InComposite; InDestabilization]
+    | InUpdate -> [Idle]
+    | InQuery -> [Idle]
+    | PostQuery -> [InQuery]
+    | InPreUpgrade -> [Idle; InStabilization]
+    | PostPreUpgrade -> [InPreUpgrade]
+    | InPostUpgrade -> [InInit; InDestabilization]
+    | InComposite -> [Idle; InComposite]
+    | InStabilization -> [Idle; InStabilization]
+    | InDestabilization -> [InInit]
+
+  let get env =
+    compile_unboxed_const (ptr ()) ^^
+    load_unskewed_ptr
+
+  let set env new_state =
+    compile_unboxed_const (ptr ()) ^^
+    compile_unboxed_const (int_of_state new_state) ^^
+    store_unskewed_ptr
+
+  let during_explicit_upgrade env =
+    get env ^^
+    compile_eq_const (int_of_state InStabilization) ^^
+    get env ^^
+    compile_eq_const (int_of_state InDestabilization) ^^
+    G.i (Binary (Wasm_exts.Values.I64 I64Op.Or))
+
+  let trans env new_state =
+    let name = "trans_state" ^ Int64.to_string (int_of_state new_state) in
+    Func.share_code0 Func.Always env name [] (fun env ->
+      G.block0 (
+        let rec go = function
+        | [] -> 
+          during_explicit_upgrade env ^^
+          E.if0
+            (E.trap_with env "Messages are blocked during stabilization")
+            (E.trap_with env
+              ("internal error: unexpected state entering " ^ string_of_state new_state))
+        | (s::ss) ->
+          get env ^^ compile_eq_const (int_of_state s) ^^
+          E.if0 (G.i (Br (nr 1l))) G.nop ^^
+          go ss
+        in go (pre_states new_state)
+        ) ^^
+      set env new_state
+    )
+
+  let is_in env state =
+    get env ^^
+    compile_eq_const (int_of_state state)
+
+end (* Lifecycle *)
+
+
+module IC = struct
+
+  (* IC-specific stuff: System imports, databufs etc. *)
+
+  let register_globals env =
+    (* result of last ic0.call_perform  *)
+    E.add_global64 env "__call_perform_status" Mutable 0L;
+    E.add_global64 env "__call_perform_message" Mutable 0L;
+    E.add_global64 env "__run_post_upgrade" Mutable 0L
+    (* NB: __call_perform_message is not a root so text contents *must* be static *)
+
+  let get_call_perform_status env =
+    G.i (GlobalGet (nr (E.get_global env "__call_perform_status")))
+  let set_call_perform_status env =
+    G.i (GlobalSet (nr (E.get_global env "__call_perform_status")))
+  let get_call_perform_message env =
+    G.i (GlobalGet (nr (E.get_global env "__call_perform_message")))
+  let set_call_perform_message env =
+    G.i (GlobalSet (nr (E.get_global env "__call_perform_message")))
+  let get_run_post_upgrade env =
+    G.i (GlobalGet (nr (E.get_global env "__run_post_upgrade")))
+  let set_run_post_upgrade env =
+    G.i (GlobalSet (nr (E.get_global env "__run_post_upgrade")))
+
+  let init_globals env =
+    Blob.lit env Tagged.T "" ^^
+    set_call_perform_message env
+
+  let i64s n = Lib.List.make n I64Type
+
+  let import_ic0 env =
+      E.add_func_import env "ic0" "accept_message" [] [];
+      E.add_func_import env "ic0" "call_data_append" (i64s 2) [];
+      E.add_func_import env "ic0" "call_cycles_add128" (i64s 2) [];
+      E.add_func_import env "ic0" "call_new" (i64s 8) [];
+      E.add_func_import env "ic0" "call_perform" [] [I32Type];
+      E.add_func_import env "ic0" "call_on_cleanup" (i64s 2) [];
+      E.add_func_import env "ic0" "canister_cycle_balance128" [I64Type] [];
+      E.add_func_import env "ic0" "canister_self_copy" (i64s 3) [];
+      E.add_func_import env "ic0" "canister_self_size" [] [I64Type];
+      E.add_func_import env "ic0" "canister_status" [] [I32Type];
+      E.add_func_import env "ic0" "canister_version" [] [I64Type];
+      E.add_func_import env "ic0" "is_controller" (i64s 2) [I32Type];
+      E.add_func_import env "ic0" "debug_print" (i64s 2) [];
+      E.add_func_import env "ic0" "msg_arg_data_copy" (i64s 3) [];
+      E.add_func_import env "ic0" "msg_arg_data_size" [] [I64Type];
+      E.add_func_import env "ic0" "msg_caller_copy" (i64s 3) [];
+      E.add_func_import env "ic0" "msg_caller_size" [] [I64Type];
+      E.add_func_import env "ic0" "msg_cycles_available128" [I64Type] [];
+      E.add_func_import env "ic0" "msg_cycles_refunded128" [I64Type] [];
+      E.add_func_import env "ic0" "msg_cycles_accept128" (i64s 3) [];
+      E.add_func_import env "ic0" "certified_data_set" (i64s 2) [];
+      E.add_func_import env "ic0" "data_certificate_present" [] [I32Type];
+      E.add_func_import env "ic0" "data_certificate_size" [] [I64Type];
+      E.add_func_import env "ic0" "data_certificate_copy" (i64s 3) [];
+      E.add_func_import env "ic0" "msg_method_name_size" [] [I64Type];
+      E.add_func_import env "ic0" "msg_method_name_copy" (i64s 3) [];
+      E.add_func_import env "ic0" "msg_reject_code" [] [I32Type];
+      E.add_func_import env "ic0" "msg_reject_msg_size" [] [I64Type];
+      E.add_func_import env "ic0" "msg_reject_msg_copy" (i64s 3) [];
+      E.add_func_import env "ic0" "msg_reject" (i64s 2) [];
+      E.add_func_import env "ic0" "msg_reply_data_append" (i64s 2) [];
+      E.add_func_import env "ic0" "msg_reply" [] [];
+      E.add_func_import env "ic0" "performance_counter" [I32Type] [I64Type];
+      E.add_func_import env "ic0" "trap" (i64s 2) [];
+      E.add_func_import env "ic0" "stable64_write" (i64s 3) [];
+      E.add_func_import env "ic0" "stable64_read" (i64s 3) [];
+      E.add_func_import env "ic0" "stable64_size" [] [I64Type];
+      E.add_func_import env "ic0" "stable64_grow" [I64Type] [I64Type];
+      E.add_func_import env "ic0" "time" [] [I64Type];
+      if !Flags.global_timer then
+        E.add_func_import env "ic0" "global_timer_set" [I64Type] [I64Type];
+      ()
+
+  let system_imports env =
+    match E.mode env with
+    | Flags.ICMode ->
+      import_ic0 env
+    | Flags.RefMode  ->
+      import_ic0 env
+    | Flags.WASIMode ->
+      (* Wasi function is still 32-bit based *)
+      E.add_func_import env "wasi_snapshot_preview1" "fd_write" [I32Type; I32Type; I32Type; I32Type] [I32Type];
+    | Flags.WasmMode -> ()
+
+  let system_call env funcname = E.call_import env "ic0" funcname
+
+  let register env =
+      let min env first second = 
+        first ^^
+        second ^^
+        compile_comparison I64Op.LtU ^^
+        E.if1 I64Type (first) (second) in
+
+      Func.define_built_in env "print_ptr" [("ptr", I64Type); ("len", I64Type)] [] (fun env ->
+        match E.mode env with
+        | Flags.WasmMode -> G.i Nop
+        | Flags.ICMode | Flags.RefMode ->
+          G.i (LocalGet (nr 0l)) ^^
+          G.i (LocalGet (nr 1l)) ^^
+          system_call env "debug_print"
+        | Flags.WASIMode -> begin
+          (* Since the wasmtime `fd_write` function still only supports 32-bit pointers in 64-bit mode, 
+             we use a static buffer for the text output that resides in the 32-bit space.
+             This buffer is reserved is limited to 512 bytes and is managed in the RTS, see `buffer_in_32_bit_range()`. *)
+          let get_ptr = G.i (LocalGet (nr 0l)) in
+          let get_len = G.i (LocalGet (nr 1l)) in
+
+          Stack.with_words env "io_vec" 6L (fun get_iovec_ptr ->
+            let buffer_length = 512 in
+            let buffer_ptr = E.call_import env "rts" "buffer_in_32_bit_range" in
+
+            (* Truncate the text if it does not fit into the buffer **)
+            min env (compile_unboxed_const (Int64.of_int buffer_length)) get_len ^^
+            G.setter_for get_len ^^
+
+            (* Copy the text to the static buffer in 32-bit space *)
+            buffer_ptr ^^
+            get_ptr ^^
+            get_len ^^
+            Heap.memcpy env ^^
+
+            (* We use the iovec functionality to append a newline *)
+            get_iovec_ptr ^^
+            narrow_to_32 env buffer_ptr ^^ (* This is safe because the buffer resides in 32-bit space *)
+            G.i (Store {ty = I32Type; align = 2; offset = 0L; sz = None}) ^^
+
+            get_iovec_ptr ^^
+            narrow_to_32 env get_len ^^
+            G.i (Store {ty = I32Type; align = 2; offset = 4L; sz = None}) ^^
+
+            get_iovec_ptr ^^
+            narrow_to_32 env get_iovec_ptr ^^ (* The stack pointer should always be in the 32-bit space *)
+            compile_add32_const 16l ^^
+            G.i (Store {ty = I32Type; align = 2; offset = 8L; sz = None}) ^^
+
+            get_iovec_ptr ^^
+            compile_const_32 1l ^^
+            G.i (Store {ty = I32Type; align = 2; offset = 12L; sz = None}) ^^
+
+            get_iovec_ptr ^^
+            compile_const_32 (Int32.of_int (Char.code '\n')) ^^
+            G.i (Store {ty = I32Type; align = 0; offset = 16L; sz = Some Wasm_exts.Types.Pack8}) ^^
+
+            (* Call fd_write twice to work around
+               https://github.com/bytecodealliance/wasmtime/issues/629
+            *)
+
+            compile_const_32 1l (* stdout *) ^^
+            narrow_to_32 env get_iovec_ptr ^^
+            compile_const_32 1l (* one string segment (2 doesn't work) *) ^^
+            narrow_to_32 env get_iovec_ptr ^^
+            compile_add32_const 20l ^^ (* out for bytes written, we ignore that *)
+            E.call_import env "wasi_snapshot_preview1" "fd_write" ^^
+            G.i Drop ^^
+
+            compile_const_32 1l (* stdout *) ^^
+            narrow_to_32 env get_iovec_ptr ^^
+            compile_add32_const 8l ^^
+            compile_const_32 1l (* one string segment *) ^^
+            narrow_to_32 env get_iovec_ptr ^^
+            compile_add32_const 20l ^^ (* out for bytes written, we ignore that *)
+            E.call_import env "wasi_snapshot_preview1" "fd_write" ^^
+            G.i Drop)
+          end);
+
+      E.add_export env (nr {
+        name = Lib.Utf8.decode "print_ptr";
+        edesc = nr (FuncExport (nr (E.built_in env "print_ptr")))
+      })
+
+
+  let ic_system_call call env =
+    match E.mode env with
+    | Flags.(ICMode | RefMode) ->
+      system_call env call
+    | _ ->
+      E.trap_with env Printf.(sprintf "cannot get %s when running locally" call)
+
+  let performance_counter env = 
+    G.i (Convert (Wasm_exts.Values.I32 I32Op.WrapI64)) ^^
+    ic_system_call "performance_counter" env
+
+  let is_controller env =
+    ic_system_call "is_controller" env ^^ 
+    G.i (Convert (Wasm_exts.Values.I64 I64Op.ExtendUI32))
+
+  let canister_version env = ic_system_call "canister_version" env
+
+  let print_ptr_len env = G.i (Call (nr (E.built_in env "print_ptr")))
+
+  let print_text env =
+    Func.share_code1 Func.Never env "print_text" ("str", I64Type) [] (fun env get_str ->
+      let (set_blob, get_blob) = new_local env "blob" in
+      get_str ^^ Text.to_blob env ^^ set_blob ^^
+      get_blob ^^ Blob.payload_ptr_unskewed env ^^
+      get_blob ^^ Blob.len env ^^
+      print_ptr_len env
+    )
+
+  (* For debugging *)
+  let _compile_static_print env s =
+    Blob.lit_ptr_len env Tagged.T s ^^ print_ptr_len env
+
+  let ic_trap env =
+    Func.share_code2 Func.Always env "ic_trap" (("ptr", I64Type), ("len", I64Type)) [] (fun env get_ptr get_length ->
+      get_ptr ^^
+      get_length ^^
+      system_call env "trap"
+    )
+
+  let trap_ptr_len env =
+    match E.mode env with
+    | Flags.WasmMode -> G.i Unreachable
+    | Flags.WASIMode -> print_ptr_len env ^^ G.i Unreachable
+    | Flags.ICMode | Flags.RefMode -> ic_trap env ^^ G.i Unreachable
+
+  let trap_with env s =
+    Blob.lit_ptr_len env Tagged.T s ^^ trap_ptr_len env
+
+  let trap_text env  =
+    Text.to_blob env ^^ Blob.as_ptr_len env ^^ trap_ptr_len env
+
+  let default_exports env =
+    (* these exports seem to be wanted by the hypervisor/v8 *)
+    E.add_export env (nr {
+      name = Lib.Utf8.decode (
+        match E.mode env with
+        | Flags.WASIMode -> "memory"
+        | _  -> "mem"
+      );
+      edesc = nr (MemoryExport (nr 0l))
+    });
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "table";
+      edesc = nr (TableExport (nr 0l))
+    })
+
+  let export_init env =
+    assert (E.mode env = Flags.ICMode || E.mode env = Flags.RefMode);
+    let empty_f = Func.of_body env [] [] (fun env ->
+      Lifecycle.trans env Lifecycle.InInit ^^
+      G.i (Call (nr (E.built_in env "init")))
+      (* Stay in `InInit` state for asynchronous destabilization after upgrade. *)
+      (* Garbage collection is not yet activated. *)
+
+    ) in
+    let fi = E.add_fun env "canister_init" empty_f in
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "canister_init";
+      edesc = nr (FuncExport (nr fi))
+      })
+
+  let export_heartbeat env =
+    assert (E.mode env = Flags.ICMode || E.mode env = Flags.RefMode);
+    let fi = E.add_fun env "canister_heartbeat"
+      (Func.of_body env [] [] (fun env ->
+        G.i (Call (nr (E.built_in env "heartbeat_exp"))) ^^
+        GC.collect_garbage env))
+    in
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "canister_heartbeat";
+      edesc = nr (FuncExport (nr fi))
+    })
+
+  let export_timer env =
+    assert !Flags.global_timer;
+    assert (E.mode env = Flags.ICMode || E.mode env = Flags.RefMode);
+    let fi = E.add_fun env "canister_global_timer"
+      (Func.of_body env [] [] (fun env ->
+        G.i (Call (nr (E.built_in env "timer_exp"))) ^^
+        GC.collect_garbage env))
+    in
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "canister_global_timer";
+      edesc = nr (FuncExport (nr fi))
+    })
+
+  let export_inspect env =
+    assert (E.mode env = Flags.ICMode || E.mode env = Flags.RefMode);
+    let fi = E.add_fun env "canister_inspect_message"
+      (Func.of_body env [] [] (fun env ->
+        G.i (Call (nr (E.built_in env "inspect_exp"))) ^^
+        system_call env "accept_message" (* assumes inspect_exp traps to reject *)
+        (* no need to GC !*)))
+    in
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "canister_inspect_message";
+      edesc = nr (FuncExport (nr fi))
+    })
+
+  let initialize_main_actor_function_name = "@initialize_main_actor"
+
+  let initialize_main_actor env =
+    G.i (Call (nr (E.built_in env initialize_main_actor_function_name))) ^^
+    get_run_post_upgrade env ^^
+    (E.if0 
+      begin
+        Lifecycle.trans env Lifecycle.InPostUpgrade ^^
+        G.i (Call (nr (E.built_in env "post_exp"))) 
+      end
+      G.nop)
+
+  let get_actor_to_persist_function_name = "@get_actor_to_persist"
+
+  let get_actor_to_persist env =
+    G.i (Call (nr (E.built_in env get_actor_to_persist_function_name)))
+
+  let export_wasi_start env =
+    assert (E.mode env = Flags.WASIMode);
+    let fi = E.add_fun env "_start" (Func.of_body env [] [] (fun env1 ->
+      Lifecycle.trans env Lifecycle.InInit ^^
+      G.i (Call (nr (E.built_in env "init"))) ^^
+      (if !Flags.sanity then
+        (* also test the GC in WASI mode if sanity checks are enabled *)
+        GC.collect_garbage env
+      else
+        G.nop) ^^
+      Lifecycle.trans env Lifecycle.Idle
+    )) in
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "_start";
+      edesc = nr (FuncExport (nr fi))
+      })
+
+  let export_upgrade_methods env =
+    if E.mode env = Flags.ICMode || E.mode env = Flags.RefMode then
+    let status_stopped = 3l in
+    let pre_upgrade_fi = E.add_fun env "pre_upgrade" (Func.of_body env [] [] (fun env ->
+      Lifecycle.trans env Lifecycle.InPreUpgrade ^^
+      (* check status is stopped or trap on outstanding callbacks *)
+      system_call env "canister_status" ^^ compile_eq32_const status_stopped ^^
+      E.if0
+       (G.nop)
+       (ContinuationTable.count env ^^
+          E.then_trap_with env "canister_pre_upgrade attempted with outstanding message callbacks (try stopping the canister before upgrade)") ^^
+      (* call pre_upgrade expression & any system method *)
+      (G.i (Call (nr (E.built_in env "pre_exp")))) ^^
+      Lifecycle.trans env Lifecycle.PostPreUpgrade
+    )) in
+
+    let post_upgrade_fi = E.add_fun env "post_upgrade" (Func.of_body env [] [] (fun env ->
+      compile_unboxed_one ^^ set_run_post_upgrade env ^^
+      Lifecycle.trans env Lifecycle.InInit ^^
+      G.i (Call (nr (E.built_in env "init")))
+      (* The post upgrade hook is called later after the completed destabilization, 
+         that may require additional explicit destabilization messages after upgrade. *)
+    )) in
+
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "canister_pre_upgrade";
+      edesc = nr (FuncExport (nr pre_upgrade_fi))
+    });
+
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "canister_post_upgrade";
+      edesc = nr (FuncExport (nr post_upgrade_fi))
+    })
+
+
+  let get_self_reference env =
+    match E.mode env with
+    | Flags.ICMode | Flags.RefMode ->
+      Func.share_code0 Func.Never env "canister_self" [I64Type] (fun env ->
+        Blob.of_size_copy env Tagged.A
+          (fun env -> 
+            system_call env "canister_self_size")
+          (fun env ->
+            system_call env "canister_self_copy")
+          (fun env -> compile_unboxed_const 0L)
+      )
+    | _ ->
+      E.trap_with env "cannot get self-actor-reference when running locally"
+
+  let get_system_time env =
+    match E.mode env with
+    | Flags.ICMode | Flags.RefMode ->
+      system_call env "time"
+    | _ ->
+      E.trap_with env "cannot get system time when running locally"
+
+  let caller env =
+    match E.mode env with
+    | Flags.ICMode | Flags.RefMode ->
+      Blob.of_size_copy env Tagged.P
+        (fun env -> 
+          system_call env "msg_caller_size")
+        (fun env -> 
+          system_call env "msg_caller_copy")
+        (fun env -> compile_unboxed_const 0L)
+    | _ ->
+      E.trap_with env (Printf.sprintf "cannot get caller when running locally")
+
+  let method_name env =
+    match E.mode env with
+    | Flags.ICMode | Flags.RefMode ->
+      Blob.of_size_copy env Tagged.T
+        (fun env -> 
+          system_call env "msg_method_name_size")
+        (fun env ->
+          system_call env "msg_method_name_copy")
+        (fun env -> compile_unboxed_const 0L)
+    | _ ->
+      E.trap_with env (Printf.sprintf "cannot get method_name when running locally")
+
+  let arg_data env =
+    match E.mode env with
+    | Flags.ICMode | Flags.RefMode ->
+      Blob.of_size_copy env Tagged.B
+        (fun env -> 
+          system_call env "msg_arg_data_size")
+        (fun env -> 
+          system_call env "msg_arg_data_copy")
+        (fun env -> compile_unboxed_const 0L)
+    | _ ->
+      E.trap_with env (Printf.sprintf "cannot get arg_data when running locally")
+
+  let reject env arg_instrs =
+    match E.mode env with
+    | Flags.ICMode | Flags.RefMode ->
+      arg_instrs ^^
+      Text.to_blob env ^^
+      Blob.as_ptr_len env ^^
+      system_call env "msg_reject"
+    | _ ->
+      E.trap_with env (Printf.sprintf "cannot reject when running locally")
+
+  let error_code env =
+     Func.share_code0 Func.Always env "error_code" [I64Type] (fun env ->
+      let (set_code, get_code) = new_local env "code" in
+      system_call env "msg_reject_code" ^^ 
+      G.i (Convert (Wasm_exts.Values.I64 I64Op.ExtendUI32)) ^^
+      set_code ^^
+      List.fold_right (fun (tag, const) code ->
+        get_code ^^ compile_unboxed_const const ^^
+        compile_comparison I64Op.Eq ^^
+        E.if1 I64Type
+          (Variant.inject env tag (Tuple.compile_unit env))
+          code)
+        ["system_fatal", 1L;
+         "system_transient", 2L;
+         "destination_invalid", 3L;
+         "canister_reject", 4L;
+         "canister_error", 5L]
+        (Variant.inject env "future" (get_code ^^ BitTagged.tag env Type.Nat32)))
+
+  let error_message env =
+    Func.share_code0 Func.Never env "error_message" [I64Type] (fun env ->
+      Blob.of_size_copy env Tagged.T
+        (fun env -> system_call env "msg_reject_msg_size")
+        (fun env ->
+          system_call env "msg_reject_msg_copy")
+        (fun env -> compile_unboxed_const 0L)
+    )
+
+  let error_value env =
+    Func.share_code0 Func.Never env "error_value" [I64Type] (fun env ->
+      error_code env ^^
+      error_message env ^^
+      Tuple.from_stack env 2
+    )
+
+  let reply_with_data env =
+    Func.share_code2 Func.Never env "reply_with_data" (("start", I64Type), ("size", I64Type)) [] (
+      fun env get_data_start get_data_size ->
+        get_data_start ^^
+        get_data_size ^^
+        system_call env "msg_reply_data_append" ^^
+        system_call env "msg_reply"
+   )
+  
+  let static_nullary_reply env =
+    Blob.lit_ptr_len env Tagged.B "DIDL\x00\x00" ^^
+    reply_with_data env
+
+  (* Actor reference on the stack *)
+  let actor_public_field env name =
+    (* simply tuple canister name and function name *)
+    Tagged.(sanity_check_tag __LINE__ env (Blob A)) ^^
+    Blob.lit env Tagged.T name ^^
+    Func.share_code2 Func.Never env "actor_public_field" (("actor", I64Type), ("func", I64Type)) [] (
+      fun env get_actor get_func ->
+      Arr.lit env Tagged.S [get_actor; get_func]
+   )
+
+
+  let fail_assert env at =
+    let open Source in
+    let at = {
+        left = {at.left with file = Filename.basename at.left.file};
+        right = {at.right with file = Filename.basename at.right.file}
+      }
+    in
+    E.trap_with env (Printf.sprintf "assertion failed at %s" (string_of_region at))
+
+  let async_method_name = Type.(motoko_async_helper_fld.lab)
+  let gc_trigger_method_name = Type.(motoko_gc_trigger_fld.lab)
+ 
+  let is_self_call env =
+    let (set_len_self, get_len_self) = new_local env "len_self" in
+    let (set_len_caller, get_len_caller) = new_local env "len_caller" in
+    system_call env "canister_self_size" ^^ set_len_self ^^
+    system_call env "msg_caller_size" ^^ set_len_caller ^^
+    get_len_self ^^ get_len_caller ^^ compile_comparison I64Op.Eq ^^
+    E.if1 I64Type
+      begin
+        get_len_self ^^ Stack.dynamic_with_bytes env "str_self" (fun get_str_self ->
+          get_len_caller ^^ Stack.dynamic_with_bytes env "str_caller" (fun get_str_caller ->
+            get_str_caller ^^ compile_unboxed_const 0L ^^ get_len_caller ^^
+            system_call env "msg_caller_copy" ^^
+            get_str_self ^^ compile_unboxed_const 0L ^^ get_len_self ^^
+            system_call env "canister_self_copy" ^^
+            get_str_self ^^ get_str_caller ^^ get_len_self ^^ Heap.memcmp env ^^
+            compile_eq_const 0L))
+      end
+      begin
+        compile_unboxed_const 0L
+      end
+
+  let assert_caller_self env =
+    is_self_call env ^^
+    E.else_trap_with env "not a self-call"
+
+  let is_controller_call env =
+    let (set_len_caller, get_len_caller) = new_local env "len_caller" in
+    system_call env "msg_caller_size" ^^ set_len_caller ^^
+    get_len_caller ^^ Stack.dynamic_with_bytes env "str_caller" (fun get_str_caller ->
+      get_str_caller ^^ compile_unboxed_const 0L ^^ get_len_caller ^^
+      system_call env "msg_caller_copy" ^^
+      get_str_caller ^^ get_len_caller ^^ is_controller env)
+
+  let assert_caller_self_or_controller env =
+    is_self_call env ^^
+    is_controller_call env ^^
+    G.i (Binary (Wasm_exts.Values.I64 I64Op.Or)) ^^
+    E.else_trap_with env "not a self-call or call from controller"
+
+  (* Cycles *)
+
+  let cycle_balance env =
+    match E.mode env with
+    | Flags.ICMode
+    | Flags.RefMode ->
+      system_call env "canister_cycle_balance128"
+    | _ ->
+      E.trap_with env "cannot read balance when running locally"
+
+  let cycles_add env =
+    match E.mode env with
+    | Flags.ICMode
+    | Flags.RefMode ->
+      system_call env "call_cycles_add128"
+    | _ ->
+      E.trap_with env "cannot accept cycles when running locally"
+
+  let cycles_accept env =
+    match E.mode env with
+    | Flags.ICMode
+    | Flags.RefMode ->
+      system_call env "msg_cycles_accept128"
+    | _ ->
+      E.trap_with env "cannot accept cycles when running locally"
+
+  let cycles_available env =
+    match E.mode env with
+    | Flags.ICMode
+    | Flags.RefMode ->
+      system_call env "msg_cycles_available128"
+    | _ ->
+      E.trap_with env "cannot get cycles available when running locally"
+
+  let cycles_refunded env =
+    match E.mode env with
+    | Flags.ICMode
+    | Flags.RefMode ->
+      system_call env "msg_cycles_refunded128"
+    | _ ->
+      E.trap_with env "cannot get cycles refunded when running locally"
+
+  let set_certified_data env =
+    match E.mode env with
+    | Flags.ICMode
+    | Flags.RefMode ->
+      Blob.as_ptr_len env ^^
+      system_call env "certified_data_set"
+    | _ ->
+      E.trap_with env "cannot set certified data when running locally"
+
+  let get_certificate env =
+    match E.mode env with
+    | Flags.ICMode
+    | Flags.RefMode ->
+      system_call env "data_certificate_present" ^^
+      Bool.from_rts_int32 ^^
+      E.if1 I64Type
+      begin
+        Opt.inject_simple env (
+          Blob.of_size_copy env Tagged.B
+            (fun env -> 
+              system_call env "data_certificate_size")
+            (fun env -> 
+              system_call env "data_certificate_copy")
+            (fun env -> compile_unboxed_const 0L)
+        )
+      end (Opt.null_lit env)
+    | _ ->
+      E.trap_with env "cannot get certificate when running locally"
+
+end (* IC *)
+
+module Cycles = struct
+
+  let from_word128_ptr env = Func.share_code1 Func.Never env "from_word128_ptr" ("ptr", I64Type) [I64Type]
+    (fun env get_ptr ->
+     let set_lower, get_lower = new_local env "lower" in
+     get_ptr ^^
+     G.i (Load {ty = I64Type; align = 0; offset = 0L; sz = None }) ^^
+     BigNum.from_word64 env ^^
+     set_lower ^^
+     get_ptr ^^
+     G.i (Load {ty = I64Type; align = 0; offset = 8L; sz = None }) ^^
+     compile_test I64Op.Eqz ^^
+     E.if1 I64Type
+       get_lower
+       begin
+         get_lower ^^
+         get_ptr ^^
+         G.i (Load {ty = I64Type; align = 0; offset = 8L; sz = None }) ^^
+         BigNum.from_word64 env ^^
+         (* shift left 64 bits *)
+         compile_unboxed_const 64L ^^
+         TaggedSmallWord.msb_adjust Type.Nat32 ^^
+         BigNum.compile_lsh env ^^
+         BigNum.compile_add env
+       end)
+
+  (* takes a bignum from the stack, traps if ≥2^128, and leaves two 64bit words on the stack *)
+  (* only used twice, so ok to not use share_code1; that would require I64Type support in FakeMultiVal *)
+  let to_two_word64 env =
+    let (set_val, get_val) = new_local env "cycles" in
+    set_val ^^
+    get_val ^^
+    Tagged.materialize_shared_value env (BigNum.constant env (Big_int.power_int_positive_int 2 128)) ^^
+    BigNum.compile_relop env Lt ^^
+    E.else_trap_with env "cycles out of bounds" ^^
+
+    get_val ^^
+    (* shift right 64 bits *)
+    compile_unboxed_const 64L ^^
+    TaggedSmallWord.msb_adjust Type.Nat32 ^^
+    BigNum.compile_rsh env ^^
+    BigNum.truncate_to_word64 env ^^
+
+    get_val ^^
+    BigNum.truncate_to_word64 env
+
+  let balance env =
+    Func.share_code0 Func.Always env "cycle_balance" [I64Type] (fun env ->
+      Stack.with_words env "dst" 4L (fun get_dst ->
+        get_dst ^^
+        IC.cycle_balance env ^^
+        get_dst ^^
+        from_word128_ptr env
+      )
+    )
+
+  let add env =
+    Func.share_code1 Func.Always env "cycle_add" ("cycles", I64Type) [] (fun env get_x ->
+      get_x ^^
+      to_two_word64 env ^^
+      IC.cycles_add env
+    )
+
+  let accept env =
+    Func.share_code1 Func.Always env "cycle_accept" ("cycles", I64Type) [I64Type] (fun env get_x ->
+      Stack.with_words env "dst" 4L (fun get_dst ->
+        get_x ^^
+        to_two_word64 env ^^
+        get_dst ^^
+        IC.cycles_accept env ^^
+        get_dst ^^
+        from_word128_ptr env
+      )
+    )
+
+  let available env =
+    Func.share_code0 Func.Always env "cycle_available" [I64Type] (fun env ->
+      Stack.with_words env "dst" 4L (fun get_dst ->
+        get_dst ^^
+        IC.cycles_available env ^^
+        get_dst ^^
+        from_word128_ptr env
+      )
+    )
+
+  let refunded env =
+    Func.share_code0 Func.Always env "cycle_refunded" [I64Type] (fun env ->
+      Stack.with_words env "dst" 4L (fun get_dst ->
+        get_dst ^^
+        IC.cycles_refunded env ^^
+        get_dst ^^
+        from_word128_ptr env
+      )
+    )
+
+end (* Cycles *)
+
+(* Low-level, almost raw access to IC stable memory.
+   Essentially a virtual page allocator
+   * enforcing limit --max-stable-pages not exceeded
+   * tracking virtual page count, ignoring physical pages added for stable variable serialization (global`__stable_mem_size`)
+   * recording current format of contents (global `__stable_version`)
+   Used to implement stable variable serialization, (experimental) stable memory library and Region type (see region.rs)
+*)
+module StableMem = struct
+
+
+  (* Raw stable memory API,
+     using ic0.stable64_xxx or
+     emulating via (for now) 64-bit memory 1
+  *)
+  let stable64_grow env =
+    E.require_stable_memory env;
+    match E.mode env with
+    | Flags.ICMode | Flags.RefMode ->
+       IC.system_call env "stable64_grow"
+    | _ ->
+       Func.share_code1 Func.Always env "stable64_grow" ("pages", I64Type) [I64Type]
+         (fun env get_pages ->
+          let set_old_pages, get_old_pages = new_local env "old_pages" in
+          get_pages ^^
+          G.i StableGrow ^^
+          set_old_pages ^^
+          get_old_pages ^^
+          compile_unboxed_const (-1L) ^^
+          compile_comparison I64Op.Eq ^^
+          E.if1 I64Type
+            begin
+             compile_unboxed_const (-1L)
+            end
+            begin
+              get_old_pages
+            end)
+
+  let stable64_size env =
+    E.require_stable_memory env;
+    match E.mode env with
+    | Flags.ICMode | Flags.RefMode ->
+       IC.system_call env "stable64_size"
+    | _ ->
+       Func.share_code0 Func.Always env "stable64_size" [I64Type]
+         (fun env ->
+          G.i StableSize)
+
+  let stable64_read env =
+    E.require_stable_memory env;
+    match E.mode env with
+    | Flags.ICMode | Flags.RefMode ->
+       IC.system_call env "stable64_read"
+    | _ ->
+       Func.share_code3 Func.Always env "stable64_read"
+         (("dst", I64Type), ("offset", I64Type), ("size", I64Type)) []
+         (fun env get_dst get_offset get_size ->
+          get_dst ^^
+          get_offset ^^
+          get_size ^^
+          G.i StableRead)
+
+  let stable64_write env =
+    E.require_stable_memory env;
+    match E.mode env with
+    | Flags.ICMode | Flags.RefMode ->
+       IC.system_call env "stable64_write"
+    | _ ->
+       Func.share_code3 Func.Always env "stable64_write"
+         (("offset", I64Type), ("src", I64Type), ("size", I64Type)) []
+         (fun env get_offset get_src get_size ->
+          get_offset ^^
+          get_src ^^
+          get_size ^^
+          G.i StableWrite)
+
+
+  (* Versioning (c.f. Region.rs) *)
+  (* NB: these constants must agree with the constants in Region.rs *)
+  let legacy_version_no_stable_memory = Int64.of_int 0 (* never manifest in serialized form *)
+  let legacy_version_some_stable_memory = Int64.of_int 1
+  let legacy_version_regions = Int64.of_int 2
+  let version_graph_copy_no_regions = Int64.of_int 3
+  let version_graph_copy_regions = Int64.of_int 4
+  let version_stable_heap_no_regions = Int64.of_int 5
+  let version_stable_heap_regions = Int64.of_int 6
+  let version_max = version_stable_heap_regions
+
+  let register_globals env =
+    (* size (in pages) *)
+    E.add_global64 env "__stablemem_size" Mutable 0L;
+    E.add_global64 env "__stablemem_version" Mutable version_stable_heap_no_regions
+
+  let get_mem_size env =
+    G.i (GlobalGet (nr (E.get_global env "__stablemem_size")))
+
+  let set_mem_size env =
+    G.i (GlobalSet (nr (E.get_global env "__stablemem_size")))
+
+  let get_version env =
+    G.i (GlobalGet (nr (E.get_global env "__stablemem_version")))
+
+  let set_version env =
+    G.i (GlobalSet (nr (E.get_global env "__stablemem_version")))
+
+  let region_init env =
+    compile_unboxed_const (if !Flags.use_stable_regions then 1L else 0L) ^^
+    E.call_import env "rts" "region_init"
+
+  (* stable memory bounds check *)
+  let guard env =
+       get_mem_size env ^^
+       compile_unboxed_const (Int64.of_int page_size_bits) ^^
+       G.i (Binary (Wasm_exts.Values.I64 I64Op.Shl)) ^^
+       compile_comparison I64Op.GeU ^^
+       E.then_trap_with env "StableMemory offset out of bounds"
+
+  (* check both offset and [offset,.., offset + size) within bounds *)
+  (* c.f. region.rs check_relative_range *)
+  (* TODO: specialize on size *)
+  let guard_range env =
+      Func.share_code2 Func.Always env "__stablemem_guard_range"
+        (("offset", I64Type), ("size", I64Type)) []
+        (fun env get_offset get_size ->
+          get_size ^^
+          compile_unboxed_one ^^
+          compile_comparison I64Op.LeU ^^
+          E.if0 begin
+            get_offset ^^
+            guard env
+          end
+          begin
+            compile_unboxed_const (Int64.minus_one) ^^
+            get_size ^^
+            G.i (Binary (Wasm_exts.Values.I64 I64Op.Sub)) ^^
+            get_offset ^^
+            compile_comparison I64Op.LtU ^^
+            E.then_trap_with env "StableMemory range overflow" ^^
+            get_offset ^^
+            get_size ^^
+            G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)) ^^
+            get_mem_size env ^^
+            compile_unboxed_const (Int64.of_int page_size_bits) ^^
+            G.i (Binary (Wasm_exts.Values.I64 I64Op.Shl)) ^^
+            compile_comparison I64Op.GtU ^^
+            E.then_trap_with env "StableMemory range out of bounds"
+          end)
+
+  let add_guard env guarded get_offset bytes =
+    if guarded then
+     (get_offset ^^
+      if bytes = 1L then
+        guard env
+      else
+        compile_unboxed_const bytes ^^
+        guard_range env)
+    else G.nop
+
+  (* TODO: crusso in read/write could avoid stack allocation by reserving and re-using scratch memory instead *)
+  let read env guarded name typ bytes load =
+      Func.share_code1 Func.Never env (Printf.sprintf "__stablemem_%sread_%s" (if guarded then "guarded_" else "") name)
+        ("offset", I64Type) [typ]
+        (fun env get_offset ->
+          let words = Int64.div (Int64.add bytes 3L) 4L in
+          add_guard env guarded get_offset bytes ^^
+          Stack.with_words env "temp_ptr" words (fun get_temp_ptr ->
+            get_temp_ptr ^^
+            get_offset ^^
+            compile_unboxed_const bytes ^^
+            stable64_read env ^^
+            get_temp_ptr ^^ load))
+
+  let write env guarded name typ bytes store =
+      Func.share_code2 Func.Never env (Printf.sprintf "__stablemem_%swrite_%s" (if guarded then "guarded_" else "") name)
+        (("offset", I64Type), ("value", typ)) []
+        (fun env get_offset get_value ->
+          let words = Int64.div (Int64.add bytes 3L) 4L in
+          add_guard env guarded get_offset bytes ^^
+          Stack.with_words env "temp_ptr" words (fun get_temp_ptr ->
+            get_temp_ptr ^^ get_value ^^ store ^^
+            get_offset ^^
+            get_temp_ptr ^^
+            compile_unboxed_const bytes ^^
+            stable64_write env))
+
+  let load_word32 = G.i (Load {ty = I32Type; align = 0; offset = 0L; sz = None})
+  let store_word32 : G.t = G.i (Store {ty = I32Type; align = 0; offset = 0L; sz = None})  
+
+  let write_word32 env =
+    write env false "word32" I32Type 4L store_word32
+
+  let write_word64 env =
+    write env false "word64" I64Type 8L store_unskewed_ptr
+
+  let read_word32 env =
+    read env false "word32" I32Type 4L load_word32
+  
+  let read_word64 env =
+    read env false "word64" I64Type 8L load_unskewed_ptr
+  
+  (* ensure_pages : ensure at least num pages allocated,
+     growing (real) stable memory if needed *)
+  let ensure_pages env =
+      Func.share_code1 Func.Always env "__stablemem_ensure_pages"
+        ("pages", I64Type) [I64Type]
+        (fun env get_pages ->
+          let (set_size, get_size) = new_local env "size" in
+          let (set_pages_needed, get_pages_needed) = new_local env "pages_needed" in
+
+          stable64_size env ^^
+          set_size ^^
+
+          get_pages ^^
+          get_size ^^
+          G.i (Binary (Wasm_exts.Values.I64 I64Op.Sub)) ^^
+          set_pages_needed ^^
+
+          get_pages_needed ^^
+          compile_unboxed_const 0L ^^
+          compile_comparison I64Op.GtS ^^
+          E.if1 I64Type
+            (get_pages_needed ^^
+             stable64_grow env)
+            get_size)
+
+      (* ensure stable memory includes [offset..offset+size), assumes size > 0 *)
+  let ensure env =
+      Func.share_code2 Func.Always env "__stablemem_ensure"
+        (("offset", I64Type), ("size", I64Type)) []
+        (fun env get_offset get_size ->
+          let (set_sum, get_sum) = new_local env "sum" in
+          get_offset ^^
+          get_size ^^
+          G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)) ^^
+          set_sum ^^
+          (* check for overflow *)
+          get_sum ^^
+          get_offset ^^
+          compile_comparison I64Op.LtU ^^
+          E.then_trap_with env "Range overflow" ^^
+          (* ensure page *)
+          get_sum ^^
+          compile_unboxed_const (Int64.of_int page_size_bits) ^^
+          G.i (Binary (Wasm_exts.Values.I64 I64Op.ShrU)) ^^
+          compile_add_const 1L ^^
+          ensure_pages env ^^
+          (* Check result *)
+          compile_unboxed_const 0L ^^
+          compile_comparison I64Op.LtS ^^
+          E.then_trap_with env "Out of stable memory.")
+
+  (* low-level grow, respecting --max-stable-pages *)
+  let grow env =
+      Func.share_code1 Func.Always env "__stablemem_grow"
+        ("pages", I64Type) [I64Type] (fun env get_pages ->
+          let (set_size, get_size) = new_local env "size" in
+          get_mem_size env ^^
+          set_size ^^
+
+          (* check within --max-stable-pages *)
+          get_size ^^
+          get_pages ^^
+          G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)) ^^
+          compile_unboxed_const (Int64.of_int (!Flags.max_stable_pages)) ^^
+          compile_comparison I64Op.GtU ^^
+          E.if1 I64Type
+            begin
+             compile_unboxed_const (-1L) ^^
+             G.i Return
+            end
+            begin
+              let (set_new_size, get_new_size) = new_local env "new_size" in
+              get_size ^^
+              get_pages ^^
+              G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)) ^^
+              set_new_size ^^
+
+              (* physical grow if necessary *)
+              let (set_ensured, get_ensured) = new_local env "ensured" in
+              get_new_size ^^
+              ensure_pages env ^^
+              set_ensured ^^
+
+              (* Check result *)
+              get_ensured ^^
+              compile_unboxed_const 0L ^^
+              compile_comparison I64Op.LtS ^^
+              E.if1 I64Type
+                ((* propagate failure -1; preserve logical size *)
+                 get_ensured)
+                ((* update logical size *)
+                 get_new_size ^^
+                 set_mem_size env ^^
+                 (* return old logical size *)
+                 get_size)
+            end)
+
+  let load_word32 env =
+    read env true "word32" I32Type 4L
+      (G.i (Load {ty = I32Type; align = 0; offset = 0L; sz = None }))
+  let store_word32 env =
+    write env true "word32" I32Type 4L
+      (G.i (Store {ty = I32Type; align = 0; offset = 0L; sz = None}))
+
+  let load_word8 env =
+    read env true "word8" I32Type 1L
+      (G.i (Load {ty = I32Type; align = 0; offset = 0L; sz = Some Wasm_exts.Types.(Pack8, ZX)}))
+  let store_word8 env =
+    write env true "word8" I32Type 1L
+      (G.i (Store {ty = I32Type; align = 0; offset = 0L; sz = None}))
+
+  let load_word16 env =
+    read env true "word16" I32Type 2L
+      (G.i (Load {ty = I32Type; align = 0; offset = 0L; sz = Some Wasm_exts.Types.(Pack16, ZX)}))
+  let store_word16 env =
+    write env true "word16" I32Type 2L
+      (G.i (Store {ty = I32Type; align = 0; offset = 0L; sz = None}))
+
+  let load_word64 env =
+    read env true "word64" I64Type 8L load_unskewed_ptr
+  let store_word64 env =
+    write env true "word64" I64Type 8L store_unskewed_ptr
+
+  let load_float64 env =
+    read env true "float64" F64Type 8L
+      (G.i (Load {ty = F64Type; align = 0; offset = 0L; sz = None }))
+  let store_float64 env =
+    write env true "float64" F64Type 8L
+      (G.i (Store {ty = F64Type; align = 0; offset = 0L; sz = None}))
+
+  let load_blob env =
+      Func.share_code2 Func.Always env "__stablemem_load_blob"
+        (("offset", I64Type), ("len", I64Type)) [I64Type]
+        (fun env get_offset get_len ->
+          let (set_blob, get_blob) = new_local env "blob" in
+          get_offset ^^
+          get_len ^^
+          guard_range env ^^
+          Blob.alloc env Tagged.B get_len ^^ set_blob ^^
+          get_blob ^^ Blob.payload_ptr_unskewed env ^^
+          get_offset ^^
+          get_len ^^
+          stable64_read env ^^
+          get_blob)
+
+  let store_blob env =
+      Func.share_code2 Func.Always env "__stablemem_store_blob"
+        (("offset", I64Type), ("blob", I64Type)) []
+        (fun env get_offset get_blob ->
+         let (set_len, get_len) = new_local env "len" in
+          get_blob ^^ Blob.len env ^^ set_len ^^
+          get_offset ^^
+          get_len ^^
+          guard_range env ^^
+          get_offset ^^
+          get_blob ^^ Blob.payload_ptr_unskewed env ^^
+          get_len ^^
+          stable64_write env)
+
+end (* StableMem *)
+
+(* StableMemoryInterface *)
+(* Core, legacy interface to IC stable memory, used to implement prims `stableMemoryXXX` of
+   library `ExperimentalStableMemory.mo`.
+   Each operation dispatches on the state of `StableMem.get_version()`.
+   * StableMem.version_stable_heap_no_regions
+     * use StableMem directly.
+   * StableMem.version_stable_heap_regions: use Region.mo
+*)
+module StableMemoryInterface = struct
+
+  (* Helpers *)
+  let get_region0 env = E.call_import env "rts" "region0_get"
+
+  let if_regions env args tys is1 is2 =
+    StableMem.get_version env ^^
+    compile_unboxed_const StableMem.version_stable_heap_regions ^^
+    compile_comparison I64Op.Eq ^^
+    E.if_ env tys
+      (get_region0 env ^^ args ^^ is1 env)
+      (args ^^ is2 env)
+
+  (* Prims *)
+  let size env =
+    E.require_stable_memory env;
+    Func.share_code0 Func.Always env "__stablememory_size" [I64Type]
+      (fun env ->
+        if_regions env
+          G.nop
+          [I64Type]
+          Region.size
+          StableMem.get_mem_size)
+
+  let grow env =
+    E.require_stable_memory env;
+    Func.share_code1 Func.Always env "__stablememory_grow" ("pages", I64Type) [I64Type]
+      (fun env get_pages ->
+        if_regions env
+          get_pages
+          [I64Type]
+          Region.grow
+          (fun env ->
+            (* logical grow *)
+            StableMem.grow env))
+
+  let load_blob env =
+    E.require_stable_memory env;
+    Func.share_code2 Func.Never env "__stablememory_load_blob"
+      (("offset", I64Type), ("len", I64Type)) [I64Type]
+      (fun env offset len ->
+        if_regions env
+          (offset ^^ len)
+          [I64Type]
+          Region.load_blob
+          StableMem.load_blob)
+  let store_blob env =
+    E.require_stable_memory env;
+    Func.share_code2 Func.Never env "__stablememory_store_blob"
+      (("offset", I64Type), ("blob", I64Type)) []
+      (fun env offset blob ->
+        if_regions env
+          (offset ^^ blob)
+          []
+          Region.store_blob
+          StableMem.store_blob)
+
+  let load_word8 env =
+    E.require_stable_memory env;
+    Func.share_code1 Func.Never env "__stablememory_load_word8"
+      ("offset", I64Type) [I32Type]
+      (fun env offset ->
+        if_regions env
+          offset
+          [I32Type]
+          Region.load_word8
+          StableMem.load_word8)
+  let store_word8 env =
+    E.require_stable_memory env;
+    Func.share_code2 Func.Never env "__stablememory_store_word8"
+      (("offset", I64Type), ("value", I32Type)) []
+      (fun env offset value ->
+        if_regions env
+          (offset ^^ value)
+          []
+          Region.store_word8
+          StableMem.store_word8)
+
+  let load_word16 env =
+    E.require_stable_memory env;
+    Func.share_code1 Func.Never env "__stablememory_load_word16"
+      ("offset", I64Type) [I32Type]
+      (fun env offset->
+        if_regions env
+          offset
+          [I32Type]
+          Region.load_word16
+          StableMem.load_word16)
+  let store_word16 env =
+    E.require_stable_memory env;
+    Func.share_code2 Func.Never env "__stablememory_store_word16"
+      (("offset", I64Type), ("value", I32Type)) []
+      (fun env offset value ->
+        if_regions env
+          (offset ^^ value)
+          []
+          Region.store_word16
+          StableMem.store_word16)
+
+  let load_word32 env =
+    E.require_stable_memory env;
+    Func.share_code1 Func.Never env "__stablememory_load_word32"
+      ("offset", I64Type) [I32Type]
+      (fun env offset ->
+        if_regions env
+          offset
+          [I32Type]
+          Region.load_word32
+          StableMem.load_word32)
+  let store_word32 env =
+    E.require_stable_memory env;
+    Func.share_code2 Func.Never env "__stablememory_store_word32"
+      (("offset", I64Type), ("value", I32Type)) []
+      (fun env offset value ->
+        if_regions env
+          (offset ^^ value)
+          []
+          Region.store_word32
+          StableMem.store_word32)
+
+  let load_word64 env =
+    E.require_stable_memory env;
+    Func.share_code1 Func.Never env "__stablememory_load_word64" ("offset", I64Type) [I64Type]
+      (fun env offset ->
+        if_regions env
+          offset
+          [I64Type]
+          Region.load_word64
+          StableMem.load_word64)
+  let store_word64 env =
+    E.require_stable_memory env;
+    Func.share_code2 Func.Never env "__stablememory_store_word64"
+      (("offset", I64Type), ("value", I64Type)) []
+      (fun env offset value ->
+        if_regions env
+          (offset ^^ value)
+          []
+          Region.store_word64
+          StableMem.store_word64)
+
+  let load_float64 env =
+    E.require_stable_memory env;
+    Func.share_code1 Func.Never env "__stablememory_load_float64"
+      ("offset", I64Type) [F64Type]
+      (fun env offset ->
+        if_regions env
+          offset
+          [F64Type]
+          Region.load_float64
+          StableMem.load_float64)
+  let store_float64 env =
+    Func.share_code2 Func.Never env "__stablememory_store_float64"
+      (("offset", I64Type), ("value", F64Type)) []
+      (fun env offset value ->
+        if_regions env
+          (offset ^^ value)
+          []
+          Region.store_float64
+          StableMem.store_float64)
+
+end
+
+module UpgradeStatistics = struct
+  let get_upgrade_instructions env =
+    E.call_import env "rts" "get_upgrade_instructions"
+  let set_upgrade_instructions env =
+    E.call_import env "rts" "set_upgrade_instructions"
+
+  let add_instructions env =
+    get_upgrade_instructions env ^^
+    GC.instruction_counter env ^^
+    G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)) ^^
+    set_upgrade_instructions env
+
+  let set_instructions env =
+    GC.instruction_counter env ^^
+    set_upgrade_instructions env
+end
+
+module RTS_Exports = struct
+  (* Must be called late, after main codegen, to ensure correct generation of
+     of functioning or unused-but-trapping stable memory exports (as required)
+   *)
+  let system_exports env =
+
+    (* Value constructors *)
+
+    let int_from_i64_fi = E.add_fun env "int_from_i64" (
+      Func.of_body env ["v", I64Type] [I64Type] (fun env ->
+        let get_v = G.i (LocalGet (nr 0l)) in
+        get_v ^^ BigNum.from_signed_word64 env
+      )
+    ) in
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "int_from_i64";
+      edesc = nr (FuncExport (nr int_from_i64_fi))
+    });
+
+    (* Traps *)
+
+    let bigint_trap_fi = E.add_fun env "bigint_trap" (
+      Func.of_body env [] [] (fun env ->
+        E.trap_with env "bigint function error"
+      )
+    ) in
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "bigint_trap";
+      edesc = nr (FuncExport (nr bigint_trap_fi))
+    });
+
+    (* Keep a memory reserve when in update or init state.
+       This reserve can be used by queries, composite queries, and upgrades. *)
+    let keep_memory_reserve_fi = E.add_fun env "keep_memory_reserve" (
+      Func.of_body env [] [I32Type] (fun env ->
+        Lifecycle.get env ^^
+        compile_eq_const Lifecycle.(int_of_state InUpdate) ^^
+        Lifecycle.get env ^^
+        compile_eq_const Lifecycle.(int_of_state InInit) ^^
+        G.i (Binary (Wasm_exts.Values.I64 I64Op.Or)) ^^
+        Bool.to_rts_int32
+      )
+    ) in
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "keep_memory_reserve";
+      edesc = nr (FuncExport (nr keep_memory_reserve_fi))
+    });
+
+    let rts_trap_fi = E.add_fun env "rts_trap" (
+      (* `libc` still uses 32-bit length parameter for `rts_trap` *)
+      Func.of_body env ["str", I64Type; "len", I32Type] [] (fun env ->
+        let get_str = G.i (LocalGet (nr 0l)) in
+        let get_len = G.i (LocalGet (nr 1l)) in
+        get_str ^^ 
+        get_len ^^ 
+        G.i (Convert (Wasm_exts.Values.I64 I64Op.ExtendUI32)) ^^
+        IC.trap_ptr_len env
+      )
+    ) in
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "rts_trap";
+      edesc = nr (FuncExport (nr rts_trap_fi))
+    });
+
+    let ic0_performance_counter_fi =
+      if E.mode env = Flags.WASIMode then
+        E.add_fun env "ic0_performance_counter" (
+            Func.of_body env ["number", I32Type] [I64Type]
+              (fun env ->
+                E.trap_with env "ic0_performance_counter is not supposed to be called in WASI"
+              )
+          )
+      else E.reuse_import env "ic0" "performance_counter" in
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "ic0_performance_counter";
+      edesc = nr (FuncExport (nr ic0_performance_counter_fi))
+    });
+
+    (* Keep a memory reserve when in update or init state. 
+    This reserve can be used by queries, composite queries, and (graph-copy) upgrades. *)
+    let keep_memory_reserve_fi = E.add_fun env "keep_memory_reserve" (
+      Func.of_body env [] [I32Type] (fun env ->
+        Lifecycle.(is_in int_of_state InUpdate) ^^
+        Lifecycle.(is_in int_of_state InInit) ^^
+        G.i (Binary (Wasm_exts.Values.I64 I64Op.Or)) ^^
+        Bool.to_rts_int32
+      )
+    ) in
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "keep_memory_reserve";
+      edesc = nr (FuncExport (nr keep_memory_reserve_fi))
+    });
+
+    let when_stable_memory_required_else_trap env code =
+      if E.requires_stable_memory env then
+        code() else
+        E.trap_with env "unreachable" in
+
+    let ic0_stable64_write_fi =
+      match E.mode env with
+      | Flags.ICMode | Flags.RefMode ->
+        E.reuse_import env "ic0" "stable64_write"
+      | Flags.WASIMode | Flags.WasmMode ->
+        E.add_fun env "ic0_stable64_write" (
+          Func.of_body env ["offset", I64Type; "src", I64Type; "size", I64Type] []
+            (fun env ->
+              when_stable_memory_required_else_trap env (fun () ->
+               let get_offset = G.i (LocalGet (nr 0l)) in
+               let get_src = G.i (LocalGet (nr 1l)) in
+               let get_size = G.i (LocalGet (nr 2l)) in
+               get_offset ^^
+               get_src ^^
+               get_size ^^
+               StableMem.stable64_write env))
+          )
+    in
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "ic0_stable64_write";
+      edesc = nr (FuncExport (nr ic0_stable64_write_fi))
+    });
+
+    let ic0_stable64_read_fi =
+      match E.mode env with
+      | Flags.ICMode | Flags.RefMode ->
+        E.reuse_import env "ic0" "stable64_read"
+      | Flags.WASIMode | Flags.WasmMode ->
+        E.add_fun env "ic0_stable64_read" (
+          Func.of_body env ["dst", I64Type; "offset", I64Type; "size", I64Type] []
+            (fun env ->
+              when_stable_memory_required_else_trap env (fun () ->
+              let get_dst = G.i (LocalGet (nr 0l)) in
+              let get_offset = G.i (LocalGet (nr 1l)) in
+              let get_size = G.i (LocalGet (nr 2l)) in
+              get_dst ^^
+              get_offset ^^
+              get_size ^^
+              StableMem.stable64_read env))
+          )
+    in
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "ic0_stable64_read";
+      edesc = nr (FuncExport (nr ic0_stable64_read_fi))
+    });
+
+    let ic0_stable64_size_fi =
+      match E.mode env with
+      | Flags.ICMode | Flags.RefMode ->
+        E.reuse_import env "ic0" "stable64_size"
+      | Flags.WASIMode | Flags.WasmMode ->
+        E.add_fun env "ic0_stable64_size" (
+          Func.of_body env [] [I64Type]
+            (fun env ->
+              if E.requires_stable_memory env then
+                StableMem.stable64_size env
+              else
+                (* The RTS also checks the persistence mode on Wasi without stable memory support *)
+                compile_unboxed_const 0L
+            )
+          )
+    in
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "ic0_stable64_size";
+      edesc = nr (FuncExport (nr ic0_stable64_size_fi))
+    });
+
+    let ic0_stable64_grow_fi =
+      match E.mode env with
+      | Flags.ICMode | Flags.RefMode ->
+        E.reuse_import env "ic0" "stable64_grow"
+      | Flags.WASIMode | Flags.WasmMode ->
+        E.add_fun env "ic0_stable64_grow" (
+          Func.of_body env ["newPages", I64Type] [I64Type]
+            (fun env ->
+              when_stable_memory_required_else_trap env (fun () ->
+                G.i (LocalGet (nr 0l)) ^^
+                StableMem.stable64_grow env))
+          )
+    in
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "ic0_stable64_grow";
+      edesc = nr (FuncExport (nr ic0_stable64_grow_fi))
+    });
+
+    let moc_stable_mem_grow_fi =
+      E.add_fun env "moc_stable_mem_grow" (
+        Func.of_body env ["newPages", I64Type] [I64Type]
+          (fun env ->
+            when_stable_memory_required_else_trap env (fun () ->
+            G.i (LocalGet (nr 0l)) ^^
+            StableMem.grow env))
+        )
+    in
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "moc_stable_mem_grow";
+      edesc = nr (FuncExport (nr moc_stable_mem_grow_fi))
+    });
+
+    let moc_stable_mem_get_size_fi =
+      E.add_fun env "moc_stable_mem_get_size" (
+        Func.of_body env [] [I64Type]
+          (fun env ->
+             when_stable_memory_required_else_trap env (fun () ->
+             StableMem.get_mem_size env))
+        )
+    in
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "moc_stable_mem_get_size";
+      edesc = nr (FuncExport (nr moc_stable_mem_get_size_fi))
+    });
+
+    let moc_stable_mem_set_size_fi =
+      E.add_fun env "moc_stable_mem_set_size" (
+        Func.of_body env ["pages", I64Type] []
+          (fun env ->
+            match E.mode env with
+            | Flags.ICMode | Flags.RefMode ->
+               G.i (LocalGet (nr 0l)) ^^
+               StableMem.set_mem_size env
+            | _ ->
+               E.trap_with env "moc_stable_mem_set_size is not supposed to be called in WASI" (* improve me *)
+          )
+        )
+    in
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "moc_stable_mem_set_size";
+      edesc = nr (FuncExport (nr moc_stable_mem_set_size_fi))
+    });
+
+    let moc_stable_mem_get_version_fi =
+      E.add_fun env "moc_stable_mem_get_version" (
+        Func.of_body env [] [I64Type]
+          (fun env ->
+             StableMem.get_version env)
+        )
+    in
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "moc_stable_mem_get_version";
+      edesc = nr (FuncExport (nr moc_stable_mem_get_version_fi))
+    });
+
+    let moc_stable_mem_set_version_fi =
+      E.add_fun env "moc_stable_mem_set_version" (
+        Func.of_body env ["version", I64Type] []
+          (fun env ->
+             G.i (LocalGet (nr 0l)) ^^
+             StableMem.set_version env
+          )
+        )
+    in
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "moc_stable_mem_set_version";
+      edesc = nr (FuncExport (nr moc_stable_mem_set_version_fi))
+      });
+
+    E.add_export env (nr {
+        name = Lib.Utf8.decode "idl_limit_check";
+        edesc = nr (FuncExport (nr (E.built_in env "idl_limit_check")))
+      })
+
+end (* RTS_Exports *)
+
+module Serialization = struct
+  (*
+    The general serialization strategy is as follows:
+    * We statically generate the IDL type description header.
+    * We traverse the data to calculate the size needed for the data buffer and the
+      reference buffer.
+    * We allocate memory for the data buffer and the reference buffer
+      (this memory area is not referenced, so will be dead with the next GC)
+    * We copy the IDL type header to the data buffer.
+    * We traverse the data and serialize it into the data buffer.
+      This is type driven, and we use the `share_code` machinery and names that
+      properly encode the type to resolve loops in a convenient way.
+    * We externalize all that new data space into a databuf
+    * We externalize the reference space into a elembuf
+    * We pass both databuf and elembuf to shared functions
+      (this mimicks the future system API)
+
+    The deserialization is analogous:
+    * We allocate some scratch space, and internalize the databuf and elembuf into it.
+    * We parse the data, in a type-driven way, using normal construction and
+      allocation, while keeping tabs on the type description header for subtyping.
+    * At the end, the scratch space is a hole in the heap, and will be reclaimed
+      by the next GC.
+  *)
+
+  module Strm = struct
+    (* Creates a fresh stream with header, storing stream token. *)
+    let create env get_data_size set_data_buf get_data_buf header =
+      let header_size = Int64.of_int (String.length header) in
+      get_data_size ^^ compile_add_const header_size ^^
+      Blob.dyn_alloc_scratch env ^^ set_data_buf ^^
+      get_data_buf ^^
+      Blob.lit env Tagged.B header ^^ Blob.payload_ptr_unskewed env ^^
+      compile_unboxed_const header_size ^^
+      Heap.memcpy env ^^
+      get_data_buf ^^ compile_add_const header_size ^^ set_data_buf
+
+    (* Checks the stream's filling, traps if unexpected *)
+    let check_filled env get_data_buf get_data_size =
+      get_data_buf ^^ get_data_size ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)) ^^
+      compile_comparison I64Op.Eq ^^
+      E.else_trap_with env "data buffer not filled"
+
+    (* Finishes the stream, performing consistency checks. 
+      Returns payload address and size including the header. *)
+    let terminate env get_data_buf get_data_size header_size =
+      get_data_buf ^^ compile_sub_const header_size ^^
+      get_data_size ^^ compile_add_const header_size
+
+    (* Builds a unique name for a name seed and a type. *)
+    let name_for fn_name ts = "@" ^ fn_name ^ "<" ^ Typ_hash.typ_seq_hash ts ^ ">"
+
+    let advance_data_buf get_data_buf =
+      get_data_buf ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)) ^^ G.setter_for get_data_buf
+
+    (* Pushes the stream's current absolute byte offset on stack.
+      The requirement is that the difference between two uses
+      of this method must give a correct _relative_ offset. *)
+    let absolute_offset _env get_data_buf = get_data_buf
+
+    (* Opportunity to flush or update the token. Stream token is on stack. *)
+    let checkpoint _env get_data_buf = G.setter_for get_data_buf
+
+    (* Reserve a small fixed number of bytes in the stream and return an
+       address to it. The address is invalidated by a GC, and as such must
+       be written to in the next few instructions. *)
+    let reserve _env get_data_buf bytes =
+      get_data_buf ^^ get_data_buf ^^ compile_add_const bytes ^^ G.setter_for get_data_buf
+
+    let write_word_leb env get_data_buf code =
+      let set_word, get_word = new_local env "word" in
+      code ^^ set_word ^^
+      I32Leb.compile_store_to_data_buf_unsigned env get_word get_data_buf ^^
+      advance_data_buf get_data_buf
+
+    let write_word_32 env get_data_buf code =
+      let word32_size = 4L in
+      get_data_buf ^^ code ^^ G.i (Convert (Wasm_exts.Values.I32 I32Op.WrapI64)) ^^
+      G.i (Store {ty = I32Type; align = 0; offset = 0L; sz = None}) ^^
+      compile_unboxed_const word32_size ^^ 
+      advance_data_buf get_data_buf
+
+    let write_byte _env get_data_buf code =
+      get_data_buf ^^ code ^^ G.i (Convert (Wasm_exts.Values.I32 I32Op.WrapI64)) ^^    
+      G.i (Store {ty = I32Type; align = 0; offset = 0L; sz = Some Wasm_exts.Types.Pack8}) ^^
+      compile_unboxed_const 1L ^^ advance_data_buf get_data_buf
+
+    let write_blob env get_data_buf get_x =
+      let set_len, get_len = new_local env "len" in
+      get_x ^^ Blob.len env ^^ set_len ^^
+      write_word_leb env get_data_buf get_len ^^
+      get_data_buf ^^
+      get_x ^^ Blob.payload_ptr_unskewed env ^^
+      get_len ^^
+      Heap.memcpy env ^^
+      get_len ^^ advance_data_buf get_data_buf
+
+    let write_text env get_data_buf get_x =
+      let set_len, get_len = new_local env "len" in
+      get_x ^^ Text.size env ^^ set_len ^^
+      write_word_leb env get_data_buf get_len ^^
+      get_x ^^ get_data_buf ^^ Text.to_buf env ^^
+      get_len ^^ advance_data_buf get_data_buf
+
+    let write_bignum_leb env get_data_buf get_x =
+      get_data_buf ^^
+      get_x ^^
+      BigNum.compile_store_to_data_buf_unsigned env ^^
+      advance_data_buf get_data_buf
+
+    let write_bignum_sleb env get_data_buf get_x =
+      get_data_buf ^^
+      get_x ^^
+      BigNum.compile_store_to_data_buf_signed env ^^
+      advance_data_buf get_data_buf
+  end (* Strm *)
+  (* Globals recording known Candid types
+    See Note [Candid subtype checks]
+  *)
+
+  let register_delayed_globals env =
+    (E.add_global64_delayed env "__candid_data_length" Immutable,
+    E.add_global64_delayed env "__type_offsets_length" Immutable,
+    E.add_global64_delayed env "__idl_types_length" Immutable)
+
+  let get_candid_data_length env =
+    G.i (GlobalGet (nr (E.get_global env "__candid_data_length")))
+  let get_type_offsets_length env =
+    G.i (GlobalGet (nr (E.get_global env "__type_offsets_length")))
+  let get_idl_types_length env =
+    G.i (GlobalGet (nr (E.get_global env "__idl_types_length")))
+
+  let candid_type_offset_size = 8L
+
+  let get_global_type_descriptor env =
+    match !(E.(env.global_type_descriptor)) with
+    | Some descriptor -> descriptor
+    | None -> assert false
+
+  let get_global_candid_data env =
+    Tagged.share env (fun env -> 
+      let descriptor = get_global_type_descriptor env in
+      Blob.load_data_segment env Tagged.B E.(descriptor.candid_data_segment) (get_candid_data_length env)
+    )
+
+  let get_global_type_offsets env =
+    Tagged.share env (fun env -> 
+      let descriptor = get_global_type_descriptor env in
+      Blob.load_data_segment env Tagged.B E.(descriptor.type_offsets_segment) (get_type_offsets_length env)
+    )
+
+  let get_global_idl_types env =
+    Tagged.share env (fun env -> 
+      let descriptor = get_global_type_descriptor env in
+      Blob.load_data_segment env Tagged.B E.(descriptor.idl_types_segment) (get_idl_types_length env)
+    )
+      
+  module Registers = struct
+
+    (* interval for checking instruction counter *)
+    let idl_value_numerator = 1L
+    let idl_value_denominator = 1L
+    let idl_value_bias = 1024L
+
+    let register_globals env =
+      E.add_global64 env "@@rel_buf_opt" Mutable 0L;
+      E.add_global64 env "@@data_buf" Mutable 0L;
+      E.add_global64 env "@@ref_buf" Mutable 0L;
+      E.add_global64 env "@@typtbl" Mutable 0L;
+      E.add_global64 env "@@typtbl_end" Mutable 0L;
+      E.add_global64 env "@@typtbl_size" Mutable 0L;
+      E.add_global64 env "@@global_typtbl" Mutable 0L;
+      E.add_global64 env "@@global_typtbl_end" Mutable 0L;
+      E.add_global64 env "@@global_typtbl_size" Mutable 0L;
+      E.add_global64 env "@@value_denominator" Mutable idl_value_denominator;
+      E.add_global64 env "@@value_numerator" Mutable idl_value_numerator;
+      E.add_global64 env "@@value_bias" Mutable idl_value_bias;
+      E.add_global64 env "@@value_quota" Mutable 0L
+
+    let get_rel_buf_opt env =
+      G.i (GlobalGet (nr (E.get_global env "@@rel_buf_opt")))
+    let set_rel_buf_opt env =
+      G.i (GlobalSet (nr (E.get_global env "@@rel_buf_opt")))
+
+    let get_data_buf env =
+      G.i (GlobalGet (nr (E.get_global env "@@data_buf")))
+    let set_data_buf env =
+      G.i (GlobalSet (nr (E.get_global env "@@data_buf")))
+
+    let get_ref_buf env =
+      G.i (GlobalGet (nr (E.get_global env "@@ref_buf")))
+    let set_ref_buf env =
+      G.i (GlobalSet (nr (E.get_global env "@@ref_buf")))
+
+    let get_typtbl env =
+      G.i (GlobalGet (nr (E.get_global env "@@typtbl")))
+    let set_typtbl env =
+      G.i (GlobalSet (nr (E.get_global env "@@typtbl")))
+
+    let get_typtbl_end env =
+      G.i (GlobalGet (nr (E.get_global env "@@typtbl_end")))
+    let set_typtbl_end env =
+      G.i (GlobalSet (nr (E.get_global env "@@typtbl_end")))
+
+    let get_typtbl_size env =
+      G.i (GlobalGet (nr (E.get_global env "@@typtbl_size")))
+    let set_typtbl_size env =
+      G.i (GlobalSet (nr (E.get_global env "@@typtbl_size")))
+
+    let get_global_typtbl env =
+      G.i (GlobalGet (nr (E.get_global env "@@global_typtbl")))
+    let set_global_typtbl env =
+      G.i (GlobalSet (nr (E.get_global env "@@global_typtbl")))
+
+    let get_global_typtbl_end env =
+      G.i (GlobalGet (nr (E.get_global env "@@global_typtbl_end")))
+    let set_global_typtbl_end env =
+      G.i (GlobalSet (nr (E.get_global env "@@global_typtbl_end")))
+
+    let get_global_typtbl_size env =
+      G.i (GlobalGet (nr (E.get_global env "@@global_typtbl_size")))
+    let set_global_typtbl_size env =
+      G.i (GlobalSet (nr (E.get_global env "@@global_typtbl_size")))
+
+    (* Used as safety guard that no temporary pointers remain in the registers across GC increments. *)
+    let clear_registers env =
+      compile_unboxed_const 0L ^^ set_rel_buf_opt env ^^
+      compile_unboxed_const 0L ^^ set_data_buf env ^^
+      compile_unboxed_const 0L ^^ set_ref_buf env ^^
+      compile_unboxed_const 0L ^^ set_typtbl env ^^
+      compile_unboxed_const 0L ^^ set_typtbl_end env ^^
+      compile_unboxed_const 0L ^^ set_typtbl_size env ^^ (* also reset for symmetry, even if no pointer *)
+      compile_unboxed_const 0L ^^ set_global_typtbl env ^^
+      compile_unboxed_const 0L ^^ set_global_typtbl_end env ^^
+      compile_unboxed_const 0L ^^ set_global_typtbl_size env (* also reset for symmetry, even if no pointer *)
+
+    let get_value_quota env =
+      G.i (GlobalGet (nr (E.get_global env "@@value_quota")))
+    let set_value_quota env =
+      G.i (GlobalSet (nr (E.get_global env "@@value_quota")))
+
+    let get_value_numerator env =
+      G.i (GlobalGet (nr (E.get_global env "@@value_numerator")))
+    let set_value_numerator env =
+      G.i (GlobalSet (nr (E.get_global env "@@value_numerator")))
+
+    let get_value_denominator env =
+      G.i (GlobalGet (nr (E.get_global env "@@value_denominator")))
+    let set_value_denominator env =
+      G.i (GlobalSet (nr (E.get_global env "@@value_denominator")))
+
+    let get_value_bias env =
+      G.i (GlobalGet (nr (E.get_global env "@@value_bias")))
+    let set_value_bias env =
+      G.i (GlobalSet (nr (E.get_global env "@@value_bias")))
+
+    let reset_value_limit env get_blob get_rel_buf_opt =
+      get_rel_buf_opt ^^
+      E.if0
+      begin (* Candid deserialization *)
+        (* Set instruction limit *)
+        let (set_product, get_product) = new_local env "product" in
+        let (set_len, get_len) = new_local env "len" in
+        get_blob ^^
+        Blob.len env ^^
+        set_len ^^
+
+        get_len ^^
+        get_value_numerator env ^^
+        G.i (Binary (Wasm_exts.Values.I64 I64Op.Mul)) ^^
+        get_value_denominator env ^^
+        G.i (Binary (Wasm_exts.Values.I64 I64Op.DivU)) ^^
+        set_product ^^
+
+        get_value_numerator env ^^ (* check overflow for non-zero numerator *)
+        (E.if0 begin
+          (* Saturate multiplication `len * idl_value_numerator` on overflow.
+             Ignore `idl_value_denomminator` on overflow. *)  
+          compile_unboxed_const (-1L) ^^ (* u64::MAX *)
+          get_value_numerator env ^^ (* non-zero! *)
+          G.i (Binary (Wasm_exts.Values.I64 I64Op.DivU)) ^^
+          get_len ^^
+          compile_comparison I64Op.LtU ^^
+          (E.if0 begin
+            compile_unboxed_const (-1L) ^^ (* u64::MAX *)
+            set_product
+          end
+            G.nop)
+        end
+          G.nop) ^^
+
+        get_product ^^
+        get_value_bias env ^^
+        G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)) ^^
+        set_value_quota env ^^
+
+        (* Saturate value_quota on overflow *)
+        get_value_quota env ^^
+        get_product ^^
+        compile_comparison I64Op.LtU ^^
+        E.if0 begin
+          compile_unboxed_const (-1L) ^^ (* u64::MAX *)
+          set_value_quota env
+        end
+          G.nop
+      end
+      begin (* Extended candid/ Destabilization *)
+        G.nop
+      end
+
+    let define_idl_limit_check env =
+      Func.define_built_in env "idl_limit_check"
+        [("decrement", I32Type); ("count", I64Type)] [] (fun env ->
+        get_rel_buf_opt env ^^
+        E.if0 begin (* Candid deserialization *)
+          get_value_quota env ^^
+          G.i (LocalGet (nr 1l)) ^^ (* Count of values *)
+          compile_comparison I64Op.LtU ^^
+          E.then_trap_with env "IDL error: exceeded value limit" ^^
+          (* if (decrement) quota -= count *)
+          G.i (LocalGet (nr 0l)) ^^
+          Bool.from_rts_int32 ^^
+          E.if0 begin
+             get_value_quota env ^^
+             G.i (LocalGet (nr 1l)) ^^
+             G.i (Binary (Wasm_exts.Values.I64 I64Op.Sub)) ^^
+             set_value_quota env
+           end
+             G.nop
+        end begin (* Extended Candid/Destabilization *)
+          G.nop
+        end)
+
+    let idl_limit_check env =
+      G.i (Call (nr (E.built_in env "idl_limit_check")))
+
+  end
+
+  open Typ_hash
+
+  let sort_by_hash fs =
+    List.sort
+      (fun (h1,_) (h2,_) -> Lib.Uint32.compare h1 h2)
+      (List.map (fun f -> (Idllib.Escape.unescape_hash f.Type.lab, f)) fs)
+
+  (* The IDL serialization prefaces the data with a type description.
+     We can statically create the type description in Ocaml code,
+     store it in the program, and just copy it to the beginning of the message.
+
+     At some point this can be factored into a function from Motoko type to IDL,
+     type and a function like this for IDL types. But due to recursion handling
+     it is easier to start like this.
+  *)
+
+  module TM = Map.Make (Type.Ord)
+
+  type mode =
+    | Candid
+    | Persistence
+
+  let to_idl_prim mode = let open Type in function
+    | Prim Null | Tup [] -> Some 1l
+    | Prim Bool -> Some 2l
+    | Prim Nat -> Some 3l
+    | Prim Int -> Some 4l
+    | Prim Nat8 -> Some 5l
+    | Prim Nat16 -> Some 6l
+    | Prim (Nat32|Char) -> Some 7l
+    | Prim Nat64 -> Some 8l
+    | Prim Int8 -> Some 9l
+    | Prim Int16 -> Some 10l
+    | Prim Int32 -> Some 11l
+    | Prim Int64 -> Some 12l
+    | Prim Float -> Some 14l
+    | Prim Text -> Some 15l
+    (* NB: Prim Blob does not map to a primitive IDL type *)
+    | Any -> Some 16l
+    | Non -> Some 17l
+    | Prim Principal -> Some 24l
+    | Prim Region -> Some 128l
+    (* only used for memory compatibility checks *)
+    | Prim Blob -> 
+      (match mode with
+      | Candid -> None
+      | Persistence -> Some 129l)
+    | _ -> None
+
+  (* some constants, also see rts/idl.c *)
+  let idl_opt       = -18l
+  let idl_vec       = -19l
+  let idl_record    = -20l
+  let idl_variant   = -21l
+  let idl_func      = -22l
+  let idl_service   = -23l
+  let idl_alias     = 1l (* see Note [mutable stable values] *)
+  
+  (* only used for memory compatibility checks *)
+  let idl_tuple     = -130l
+
+  (* TODO: use record *)
+  let type_desc env mode ts :
+     string * int list * int32 list  (* type_desc, (relative offsets), indices of ts *)
+    =
+    let open Type in
+
+    (* Type traversal *)
+    (* We do a first traversal to find out the indices of non-primitive types *)
+    let (typs, idx) =
+      let typs = ref [] in
+      let idx = ref TM.empty in
+      let rec go t =
+        let t = Type.normalize t in
+        if to_idl_prim mode t <> None then () else
+        if TM.mem t !idx then () else begin
+          idx := TM.add t (Lib.List32.length !typs) !idx;
+          typs := !typs @ [ t ];
+          match t with
+          | Tup ts -> List.iter go ts
+          | Obj (_, fs) ->
+            List.iter (fun f -> go f.typ) fs
+          | Array (Mut t) -> go (Array t)
+          | Array t -> go t
+          | Opt t -> go t
+          | Variant vs -> List.iter (fun f -> go f.typ) vs
+          | Func (s, c, tbs, ts1, ts2) ->
+            List.iter go ts1; List.iter go ts2
+          | Prim Blob -> ()
+          | Mut t -> go t
+          | _ ->
+            Printf.eprintf "type_desc: unexpected type %s\n" (string_of_typ t);
+            assert false
+        end
+      in
+      List.iter go ts;
+      (!typs, !idx)
+    in
+
+    (* buffer utilities *)
+    let buf = Buffer.create 16 in
+
+    let add_u8 i =
+      Buffer.add_char buf (Char.chr (i land 0xff)) in
+
+    let rec add_leb128_32 (i : Lib.Uint32.t) =
+      let open Lib.Uint32 in
+      let b = logand i (of_int32 0x7fl) in
+      if of_int32 0l <= i && i < of_int32 128l
+      then add_u8 (to_int b)
+      else begin
+        add_u8 (to_int (logor b (of_int32 0x80l)));
+        add_leb128_32 (shift_right_logical i 7)
+      end in
+
+    let add_leb128 i =
+      assert (i >= 0);
+      add_leb128_32 (Lib.Uint32.of_int i) in
+
+    let rec add_sleb128 (i : int32) =
+      let open Int32 in
+      let b = logand i 0x7fl in
+      if -64l <= i && i < 64l
+      then add_u8 (to_int b)
+      else begin
+        add_u8 (to_int (logor b 0x80l));
+        add_sleb128 (shift_right i 7)
+      end in
+
+    (* Actual binary data *)
+
+    let add_idx t =
+      let t = Type.normalize t in
+      match to_idl_prim mode t with
+      | Some i -> add_sleb128 (Int32.neg i)
+      | None -> add_sleb128 (TM.find (normalize t) idx) in
+
+    let idx t =
+      let t = Type.normalize t in
+      match to_idl_prim mode t with
+      | Some i -> Int32.neg i
+      | None -> TM.find (normalize t) idx in
+
+    let rec add_typ t =
+      match t with
+      | Non -> assert false
+      | Prim Blob ->
+        assert (mode = Candid);
+        add_typ Type.(Array (Prim Nat8))
+      | Prim Region ->
+        add_sleb128 idl_alias; add_idx t
+      | Prim _ -> assert false
+      | Tup ts ->
+        add_sleb128 (match mode with
+        | Candid -> idl_record
+        | Persistence -> idl_tuple);
+        add_leb128 (List.length ts);
+        List.iteri (fun i t ->
+          add_leb128 i;
+          add_idx t;
+        ) ts
+      | Obj ((Object | Memory), fs) ->
+        add_sleb128 idl_record;
+        add_leb128 (List.length fs);
+        List.iter (fun (h, f) ->
+          add_leb128_32 h;
+          add_idx f.typ
+        ) (sort_by_hash fs)
+      | Array (Mut t) ->
+        add_sleb128 idl_alias; add_idx (Array t)
+      | Array t ->
+        add_sleb128 idl_vec; add_idx t
+      | Opt t ->
+        add_sleb128 idl_opt; add_idx t
+      | Variant vs ->
+        add_sleb128 idl_variant;
+        add_leb128 (List.length vs);
+        List.iter (fun (h, f) ->
+          add_leb128_32 h;
+          add_idx f.typ
+        ) (sort_by_hash vs)
+      | Func (s, c, tbs, ts1, ts2) ->
+        assert (Type.is_shared_sort s);
+        add_sleb128 idl_func;
+        add_leb128 (List.length ts1);
+        List.iter add_idx ts1;
+        add_leb128 (List.length ts2);
+        List.iter add_idx ts2;
+        begin match s, c with
+          | _, Returns ->
+            add_leb128 1; add_u8 2; (* oneway *)
+          | Shared Write, _ ->
+            add_leb128 0; (* no annotation *)
+          | Shared Query, _ ->
+            add_leb128 1; add_u8 1; (* query *)
+          | Shared Composite, _ ->
+            add_leb128 1; add_u8 3; (* composite *)
+          | _ -> assert false
+        end
+      | Obj (Actor, fs) ->
+        add_sleb128 idl_service;
+        add_leb128 (List.length fs);
+        List.iter (fun f ->
+          add_leb128 (String.length f.lab);
+          Buffer.add_string buf f.lab;
+          add_idx f.typ
+        ) fs
+      | Mut t ->
+        add_sleb128 idl_alias; add_idx t
+      | _ -> assert false in
+
+    Buffer.add_string buf "DIDL";
+    add_leb128 (List.length typs);
+    let offsets = List.map (fun typ ->
+      let offset = Buffer.length buf in
+      add_typ typ;
+      offset)
+      typs
+    in
+    add_leb128 (List.length ts);
+    List.iter add_idx ts;
+    (Buffer.contents buf,
+     offsets,
+     List.map idx ts)
+
+  (* See Note [Candid subtype checks] *)
+  let reserve_global_type_descriptor (env : E.t) =
+    let candid_data_segment = E.add_data_segment env "" in
+    let type_offsets_segment = E.add_data_segment env "" in
+    let idl_types_segment = E.add_data_segment env "" in
+    E.(env.global_type_descriptor := Some {
+      candid_data_segment;
+      type_offsets_segment;
+      idl_types_segment;
+    })
+
+  let create_global_type_descriptor (env : E.t) (set_candid_data_length, set_type_offsets_length, set_idl_types_length) =
+    let descriptor = get_global_type_descriptor env in
+    let candid_data, type_offsets, idl_types = type_desc env Candid (E.get_typtbl_typs env) in
+    let candid_data_binary = [StaticBytes.Bytes candid_data] in
+    let candid_data_length = E.replace_data_segment env E.(descriptor.candid_data_segment) candid_data_binary in
+    set_candid_data_length candid_data_length;
+    let type_offsets_binary = [StaticBytes.i64s (List.map Int64.of_int type_offsets)] in
+    let type_offsets_length = E.replace_data_segment env E.(descriptor.type_offsets_segment) type_offsets_binary in
+    set_type_offsets_length type_offsets_length;
+    let idl_types_64 = List.map Wasm.I64_convert.extend_i32_u idl_types in
+    let idl_types_binary = [StaticBytes.i64s idl_types_64] in
+    let idl_types_length = E.replace_data_segment env E.(descriptor.idl_types_segment) idl_types_binary in
+    set_idl_types_length idl_types_length
+
+  (* Returns data (in bytes) and reference buffer size (in entries) needed *)
+  let rec buffer_size env t =
+    let open Type in
+    let t = Type.normalize t in
+    let name = "@buffer_size<" ^ typ_hash t ^ ">" in
+    Func.share_code1 Func.Always env name ("x", I64Type) [I64Type; I64Type]
+    (fun env get_x ->
+
+      (* Some combinators for writing values *)
+      let (set_data_size, get_data_size) = new_local env "data_size" in
+      let (set_ref_size, get_ref_size) = new_local env "ref_size" in
+      compile_unboxed_const 0L ^^ set_data_size ^^
+      compile_unboxed_const 0L ^^ set_ref_size ^^
+
+      let inc_data_size code =
+        get_data_size ^^
+        code ^^
+        G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)) ^^
+        set_data_size
+      in
+
+      let size_word env code =
+        let (set_word, get_word) = new_local env "word" in
+        code ^^ set_word ^^
+        inc_data_size (I32Leb.compile_leb128_size get_word)
+      in
+
+      let size env t =
+        let (set_inc, get_inc) = new_local env "inc" in
+        buffer_size env t ^^
+        get_ref_size ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)) ^^ set_ref_size ^^
+        set_inc ^^ inc_data_size get_inc
+      in
+
+      (* the incremental GC leaves array slice information in tag,
+         the slice information can be removed and the tag reset to array
+         as the GC can resume marking from the array beginning *)
+      let clear_array_slicing =
+        let (set_temp, get_temp) = new_local env "temp" in
+        set_temp ^^
+        get_temp ^^ compile_unboxed_const Tagged.(int_of_tag StableSeen) ^^
+        compile_comparison I64Op.Ne ^^
+        get_temp ^^ compile_unboxed_const Tagged.(int_of_tag CoercionFailure) ^^
+        compile_comparison I64Op.Ne ^^
+        G.i (Binary (Wasm_exts.Values.I64 I64Op.And)) ^^
+        get_temp ^^ compile_unboxed_const Tagged.(int_of_tag ArraySliceMinimum) ^^
+        compile_comparison I64Op.GeU ^^
+        G.i (Binary (Wasm_exts.Values.I64 I64Op.And)) ^^
+        E.if1 I64Type begin
+          (compile_unboxed_const Tagged.(int_of_tag (Array M)))
+        end begin
+          get_temp
+        end
+      in
+
+      let size_alias size_thing =
+        (* see Note [mutable stable values] *)
+        let (set_tag, get_tag) = new_local env "tag" in
+        get_x ^^ Tagged.load_tag env ^^ clear_array_slicing ^^ set_tag ^^
+        (* Sanity check *)
+        get_tag ^^ compile_eq_const Tagged.(int_of_tag StableSeen) ^^
+        get_tag ^^ compile_eq_const Tagged.(int_of_tag MutBox) ^^
+        G.i (Binary (Wasm_exts.Values.I64 I64Op.Or)) ^^
+        get_tag ^^ compile_eq_const Tagged.(int_of_tag (Array M)) ^^
+        G.i (Binary (Wasm_exts.Values.I64 I64Op.Or)) ^^
+        get_tag ^^ compile_eq_const Tagged.(int_of_tag Region) ^^
+        G.i (Binary (Wasm_exts.Values.I64 I64Op.Or)) ^^
+        E.else_trap_with env "object_size/Mut: Unexpected tag." ^^
+        (* Check if we have seen this before *)
+        get_tag ^^ compile_eq_const Tagged.(int_of_tag StableSeen) ^^
+        E.if0 begin
+          (* Seen before *)
+          (* One byte marker, one word offset *)
+          (* TODO: Support 64-bit pointer in serialization *)
+          inc_data_size (compile_unboxed_const 5L)
+        end begin
+          (* Not yet seen *)
+          (* One byte marker, two words scratch space *)
+          inc_data_size (compile_unboxed_const 9L) ^^
+          (* Mark it as seen *)
+          get_x ^^ Tagged.(store_tag env StableSeen) ^^
+          (* and descend *)
+          size_thing ()
+        end
+      in
+
+      (* Now the actual type-dependent code *)
+      begin match t with
+      | Prim Nat -> inc_data_size (get_x ^^ BigNum.compile_data_size_unsigned env)
+      | Prim Int -> inc_data_size (get_x ^^ BigNum.compile_data_size_signed env)
+      | Prim (Int8|Nat8) -> inc_data_size (compile_unboxed_const 1L)
+      | Prim (Int16|Nat16) -> inc_data_size (compile_unboxed_const 2L)
+      | Prim (Int32|Nat32|Char) -> inc_data_size (compile_unboxed_const 4L)
+      | Prim (Int64|Nat64|Float) -> inc_data_size (compile_unboxed_const 8L)
+      | Prim Bool -> inc_data_size (compile_unboxed_const 1L)
+      | Prim Null -> G.nop
+      | Any -> G.nop
+      | Tup [] -> G.nop (* e(()) = null *)
+      | Tup ts ->
+        G.concat_mapi (fun i t ->
+          get_x ^^ Tuple.load_n env (Int64.of_int i) ^^
+          size env t
+          ) ts
+      | Obj ((Object | Memory), fs) ->
+        G.concat_map (fun (_h, f) ->
+          get_x ^^ Object.load_idx_raw env f.Type.lab ^^
+          size env f.typ
+          ) (sort_by_hash fs)
+      | Array (Mut t) ->
+        size_alias (fun () -> get_x ^^ size env (Array t))
+      | Array t ->
+        size_word env (get_x ^^ Arr.len env) ^^
+        get_x ^^ Arr.len env ^^
+        from_0_to_n env (fun get_i ->
+          get_x ^^ get_i ^^ Arr.unsafe_idx env ^^ load_ptr ^^
+          size env t
+        )
+      | Prim Blob ->
+        let (set_len, get_len) = new_local env "len" in
+        get_x ^^ Blob.len env ^^ set_len ^^
+        size_word env get_len ^^
+        inc_data_size get_len
+      | Prim Text ->
+        let (set_len, get_len) = new_local env "len" in
+        get_x ^^ Text.size env ^^ set_len ^^
+        size_word env get_len ^^
+        inc_data_size get_len
+      | Opt t ->
+        inc_data_size (compile_unboxed_const 1L) ^^ (* one byte tag *)
+        get_x ^^ Opt.is_some env ^^
+        E.if0 (get_x ^^ Opt.project env ^^ size env t) G.nop
+      | Variant vs ->
+        List.fold_right (fun (i, {lab = l; typ = t; _}) continue ->
+            get_x ^^
+            Variant.test_is env l ^^
+            E.if0
+              ( size_word env (compile_unboxed_const (Int64.of_int i)) ^^
+                get_x ^^ Variant.project env ^^ size env t
+              ) continue
+          )
+          ( List.mapi (fun i (_h, f) -> (i,f)) (sort_by_hash vs) )
+          ( E.trap_with env "buffer_size: unexpected variant" )
+      | Func _ ->
+        inc_data_size (compile_unboxed_const 1L) ^^ (* one byte tag *)
+        get_x ^^ Arr.load_field env 0L ^^ size env (Obj (Actor, [])) ^^
+        get_x ^^ Arr.load_field env 1L ^^ size env (Prim Text)
+      | Obj (Actor, _) | Prim Principal ->
+        inc_data_size (compile_unboxed_const 1L) ^^ (* one byte tag *)
+        get_x ^^ size env (Prim Blob)
+      | Non ->
+        E.trap_with env "buffer_size called on value of type None"
+      | Prim Region ->
+         size_alias (fun () ->
+          inc_data_size (compile_unboxed_const 12L) ^^ (* |id| + |page_count| = 8 + 4 *)
+          get_x ^^ Region.vec_pages env ^^ size env (Prim Blob))
+      | Mut t ->
+        size_alias (fun () -> get_x ^^ MutBox.load_field env ^^ size env t)
+      | _ -> todo "buffer_size" (Arrange_ir.typ t) G.nop
+      end ^^
+      (* Check 32-bit overflow of buffer_size *)
+      (* TODO: Support 64-bit buffer *)
+      get_data_size ^^
+      compile_shrU_const 32L ^^
+      compile_test I64Op.Eqz ^^
+      E.else_trap_with env "buffer_size overflow" ^^
+      get_data_size ^^
+      get_ref_size
+    )
+
+  (* TODO: Change serialization version because format changes with 64-bit *)
+  (* Copies x to the data_buffer, storing references after ref_count entries in ref_base *)
+  let rec serialize_go env t =
+    let open Type in
+    let t = Type.normalize t in
+    let name = Strm.name_for "serialize_go" [t] in
+    Func.share_code3 Func.Always env name (("x", I64Type), ("data_buffer", I64Type), ("ref_buffer", I64Type)) [I64Type; I64Type]
+    (fun env get_x get_data_buf get_ref_buf ->
+      let set_ref_buf = G.setter_for get_ref_buf in
+
+      (* Some combinators for writing values *)
+      let open Strm in
+
+      let write env t =
+        get_data_buf ^^
+        get_ref_buf ^^
+        serialize_go env t ^^
+        set_ref_buf ^^
+        checkpoint env get_data_buf
+      in
+
+      let write_alias write_thing =
+        (* see Note [mutable stable values] *)
+        (* Check heap tag *)
+        let (set_tag, get_tag) = new_local env "tag" in
+        get_x ^^ Tagged.load_tag env ^^ set_tag ^^
+        get_tag ^^ compile_eq_const Tagged.(int_of_tag StableSeen) ^^
+        E.if0
+        begin
+          (* This is the real data *)
+          write_byte env get_data_buf (compile_unboxed_const 0L) ^^
+          (* Remember the current offset in the tag word *)
+          get_x ^^ Tagged.load_forwarding_pointer env ^^ Strm.absolute_offset env get_data_buf ^^
+          Tagged.store_field env Tagged.tag_field ^^
+          (* Leave space in the output buffer for the decoder's bookkeeping *)
+          (* For compatibility, this is still in 32-bit format, but not useful in 64-bit *)
+          write_word_32 env get_data_buf (compile_unboxed_const 0L) ^^
+          write_word_32 env get_data_buf (compile_unboxed_const 0L) ^^
+          (* Now the data, following the object field mutbox indirection *)
+          write_thing ()
+        end
+        begin
+          (* This is a reference *)
+          write_byte env get_data_buf (compile_unboxed_const 1L) ^^
+          (* Sanity Checks *)
+          get_tag ^^ compile_eq_const Tagged.(int_of_tag MutBox) ^^
+          E.then_trap_with env "unvisited mutable data in serialize_go (MutBox)" ^^
+          get_tag ^^ compile_eq_const Tagged.(int_of_tag (Array M)) ^^
+          E.then_trap_with env "unvisited mutable data in serialize_go (Array)" ^^
+          get_tag ^^ compile_eq_const Tagged.(int_of_tag Region) ^^
+          E.then_trap_with env "unvisited mutable data in serialize_go (Region)" ^^
+          (* Second time we see this *)
+          (* Calculate relative offset *)
+          let set_offset, get_offset = new_local env "offset" in
+          get_tag ^^ Strm.absolute_offset env get_data_buf ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Sub)) ^^
+          set_offset ^^
+          (* A sanity check *)
+          get_offset ^^ compile_unboxed_const 0L ^^
+          compile_comparison I64Op.LtS ^^
+          E.else_trap_with env "Odd offset" ^^
+          (* TODO: Support serialization beyond 32-bit *)
+          get_offset ^^ compile_unboxed_const 0xffff_ffff_0000_0000L ^^
+          compile_comparison I64Op.GeS ^^
+          E.else_trap_with env "64-bit offsets not yet supported during serialization" ^^
+          (* Write the offset to the output buffer *)
+          write_word_32 env get_data_buf get_offset
+        end
+      in
+
+      (* Now the actual serialization *)
+
+      begin match t with
+      | Prim Nat ->
+        write_bignum_leb env get_data_buf get_x
+      | Prim Int ->
+        write_bignum_sleb env get_data_buf get_x
+      | Prim Float ->
+        reserve env get_data_buf 8L ^^
+        get_x ^^ Float.unbox env ^^
+        G.i (Store {ty = F64Type; align = 0; offset = 0L; sz = None})
+      | Prim ((Int64|Nat64) as pty) ->
+        reserve env get_data_buf 8L ^^
+        get_x ^^ BoxedWord64.unbox env pty ^^
+        G.i (Store {ty = I64Type; align = 0; offset = 0L; sz = None})
+      | Prim ((Int32|Nat32) as ty) ->
+        write_word_32 env get_data_buf (get_x ^^ TaggedSmallWord.lsb_adjust ty)
+      | Prim Char ->
+        write_word_32 env get_data_buf (get_x ^^ TaggedSmallWord.lsb_adjust_codepoint env)
+      | Prim ((Int16|Nat16) as ty) ->
+        reserve env get_data_buf 2L ^^
+        get_x ^^ TaggedSmallWord.lsb_adjust ty ^^ G.i (Convert (Wasm_exts.Values.I32 I32Op.WrapI64)) ^^
+        G.i (Store {ty = I32Type; align = 0; offset = 0L; sz = Some Wasm_exts.Types.Pack16})
+      | Prim ((Int8|Nat8) as ty) ->
+        write_byte env get_data_buf (get_x ^^ TaggedSmallWord.lsb_adjust ty)
+      | Prim Bool ->
+        write_byte env get_data_buf get_x
+      | Tup [] -> (* e(()) = null *)
+        G.nop
+      | Tup ts ->
+        G.concat_mapi (fun i t ->
+          get_x ^^ Tuple.load_n env (Int64.of_int i) ^^
+          write env t
+        ) ts
+      | Obj ((Object | Memory), fs) ->
+        G.concat_map (fun (_h, f) ->
+          get_x ^^ Object.load_idx_raw env f.Type.lab ^^
+          write env f.typ
+        ) (sort_by_hash fs)
+      | Array (Mut t) ->
+        write_alias (fun () -> get_x ^^ write env (Array t))
+      | Prim Region ->
+        write_alias (fun () ->
+          reserve env get_data_buf 8L ^^
+          get_x ^^ Region.id env ^^
+          G.i (Store {ty = I64Type; align = 0; offset = 0L; sz = None}) ^^
+          write_word_32 env get_data_buf (get_x ^^ Region.page_count env) ^^
+          write_blob env get_data_buf (get_x ^^ Region.vec_pages env)
+        )
+      | Array t ->
+        write_word_leb env get_data_buf (get_x ^^ Arr.len env) ^^
+        get_x ^^ Arr.len env ^^
+        from_0_to_n env (fun get_i ->
+          get_x ^^ get_i ^^ Arr.unsafe_idx env ^^ load_ptr ^^
+          write env t
+        )
+      | Prim Null -> G.nop
+      | Any -> G.nop
+      | Opt t ->
+        get_x ^^
+        Opt.is_some env ^^
+        E.if0
+          (write_byte env get_data_buf (compile_unboxed_const 1L) ^^ get_x ^^ Opt.project env ^^ write env t)
+          (write_byte env get_data_buf (compile_unboxed_const 0L))
+      | Variant vs ->
+        List.fold_right (fun (i, {lab = l; typ = t; _}) continue ->
+            get_x ^^
+            Variant.test_is env l ^^
+            E.if0
+              ( write_word_leb env get_data_buf (compile_unboxed_const (Int64.of_int i)) ^^
+                get_x ^^ Variant.project env ^^ write env t)
+              continue
+          )
+          ( List.mapi (fun i (_h, f) -> (i,f)) (sort_by_hash vs) )
+          ( E.trap_with env "serialize_go: unexpected variant" )
+      | Prim Blob ->
+        write_blob env get_data_buf get_x
+      | Prim Text ->
+        write_text env get_data_buf get_x
+      | Func _ ->
+        write_byte env get_data_buf (compile_unboxed_const 1L) ^^
+        get_x ^^ Arr.load_field env 0L ^^ write env (Obj (Actor, [])) ^^
+        get_x ^^ Arr.load_field env 1L ^^ write env (Prim Text)
+      | Obj (Actor, _) | Prim Principal ->
+        write_byte env get_data_buf (compile_unboxed_const 1L) ^^
+        get_x ^^ write env (Prim Blob)
+      | Non ->
+        E.trap_with env "serializing value of type None"
+      | Mut t ->
+        write_alias (fun () ->
+          get_x ^^ MutBox.load_field env ^^ write env t
+        )
+      | _ -> todo "serialize" (Arrange_ir.typ t) G.nop
+      end ^^
+      get_data_buf ^^
+      get_ref_buf
+    )
+
+  (* This value is returned by deserialize_go if deserialization fails in a way
+     that should be recoverable by opt parsing.
+     It is an (invalid) sentinel pointer value (in skewed format) and can be used for pointer comparison.
+     It will be never placed on the heap and must not be dereferenced.
+     If unskewed, it refers to the unallocated last Wasm memory page.
+  *)
+  let coercion_error_value env = 0xffff_ffff_ffff_fffdL
+
+  (* See Note [Candid subtype checks] *)
+  let with_rel_buf_opt env extended get_typtbl_size1 get_typtbl_size2 f =
+    if extended then
+      f (compile_unboxed_const 0L)
+    else
+      get_typtbl_size1 ^^ get_typtbl_size2 ^^
+      E.call_import env "rts" "idl_sub_buf_words" ^^
+      Stack.dynamic_with_words env "rel_buf" (fun get_ptr ->
+        get_ptr ^^ get_typtbl_size1 ^^ get_typtbl_size2 ^^
+        E.call_import env "rts" "idl_sub_buf_init" ^^
+        f get_ptr)
+
+  (* See Note [Candid subtype checks] *)
+  let idl_sub env t2 =
+    let idx = Wasm.I64_convert.extend_i32_u (E.add_typtbl_typ env t2) in
+    get_global_idl_types env ^^
+    Blob.payload_ptr_unskewed env ^^
+    G.i (Load {ty = I64Type; align = 0; offset = Int64.mul idx candid_type_offset_size (*!*); sz = None}) ^^
+    Func.share_code2 Func.Always env ("idl_sub")
+      (("idltyp1", I64Type),
+       ("idltyp2", I64Type)
+      )
+      [I64Type]
+      (fun env get_idltyp1 get_idltyp2 ->
+        Registers.get_rel_buf_opt env ^^
+        E.else_trap_with env "null rel_buf" ^^
+        Registers.get_rel_buf_opt env ^^
+        Registers.get_typtbl env ^^
+        Registers.get_global_typtbl env ^^
+        Registers.get_typtbl_end env ^^
+        Registers.get_global_typtbl_end env ^^
+        Registers.get_typtbl_size env ^^
+        Registers.get_global_typtbl_size env ^^
+        get_idltyp1 ^^
+        G.i (Convert (Wasm_exts.Values.I32 I32Op.WrapI64)) ^^
+        get_idltyp2 ^^
+        G.i (Convert (Wasm_exts.Values.I32 I32Op.WrapI64)) ^^
+        E.call_import env "rts" "idl_sub" ^^
+        G.i (Convert (Wasm_exts.Values.I64 I64Op.ExtendUI32))
+        )
+
+  (* The main deserialization function, generated once per type hash.
+
+     We use a combination of RTS stack locals and registers (Wasm globals) for
+     recursive parameter passing to avoid exhausting the Wasm stack, which is instead
+     used solely for return values and (implicit) return addresses.
+
+     Its RTS stack parameters are (c.f. module Stack):
+
+       * idltyp:      The idl type (prim type or table index) to decode now
+       * depth:       Recursion counter; reset when we make progres on the value
+       * can_recover: Whether coercion errors are recoverable, see coercion_failed below
+
+     Its register parameters are (c.f. Registers):
+       * rel_buf_opt: The optional subtype check memoization table
+          (non-null for untrusted Candid but null for trusted de-stablization (see `with_rel_buf_opt`).)
+       * data_buffer: The current position of the input data buffer
+       * ref_buffer:  The current position of the input references buffer
+       * typtbl:      The type table, as returned by parse_idl_header
+       * typtbl_size: The size of the type table, used to limit recursion
+
+     It returns the value of type t (vanilla representation) or coercion_error_value,
+     It advances the data_buffer past the decoded value (even if it returns coercion_error_value!)
+
+  *)
+
+  (* symbolic names for arguments passed on RTS stack *)
+  module StackArgs = struct
+    let idltyp = 0L
+    let depth = 1L
+    let can_recover = 2L
+  end
+
+  let rec deserialize_go env t =
+    let open Type in
+    let t = Type.normalize t in
+    let name = "@deserialize_go<" ^ typ_hash t ^ ">" in
+    Func.share_code0 Func.Always env name
+      [I64Type]
+      (fun env  ->
+      let get_idltyp = Stack.get_local env StackArgs.idltyp in
+      let get_depth = Stack.get_local env StackArgs.depth in
+      let get_can_recover = Stack.get_local env StackArgs.can_recover in
+      let get_rel_buf_opt = Registers.get_rel_buf_opt env in
+      let get_data_buf = Registers.get_data_buf env in
+      let _get_ref_buf = Registers.get_ref_buf env in
+      let get_typtbl = Registers.get_typtbl env in
+      let _get_typtbl_end = Registers.get_typtbl_end env in
+      let get_typtbl_size = Registers.get_typtbl_size env in
+
+      (* Decrement and check idl quota *)
+      Bool.lit_rts_int32 true ^^
+      compile_unboxed_const 1L ^^
+      Registers.idl_limit_check env ^^
+
+      (* Check recursion depth (protects against empty record etc.) *)
+      (* Factor 2 because at each step, the expected type could go through one
+         level of opt that is not present in the value type
+      *)
+      get_depth ^^
+      get_typtbl_size ^^ compile_add_const 1L ^^ compile_mul_const 2L ^^
+      compile_comparison I64Op.LeU ^^
+      E.else_trap_with env ("IDL error: circular record read") ^^
+
+      (* Remember data buffer position, to detect progress *)
+      let (set_old_pos, get_old_pos) = new_local env "old_pos" in
+      ReadBuf.get_ptr get_data_buf ^^ set_old_pos ^^
+
+      let go' can_recover env t =
+        (* assumes idltyp on stack *)
+        Stack.with_frame env "frame_ptr" 3L (fun () ->
+          Stack.set_local env StackArgs.idltyp ^^
+          (* set up frame arguments *)
+          ( (* Reset depth counter if we made progress *)
+            ReadBuf.get_ptr get_data_buf ^^ get_old_pos ^^
+            compile_comparison I64Op.Eq ^^
+            E.if1 I64Type
+              (Stack.get_prev_local env 1L ^^ compile_add_const 1L)
+              (compile_unboxed_const 0L)
+            ) ^^
+          Stack.set_local env StackArgs.depth ^^
+          (if can_recover
+             then compile_unboxed_const 1L
+             else Stack.get_prev_local env 2L) ^^
+          Stack.set_local env StackArgs.can_recover ^^
+          deserialize_go env t)
+      in
+
+      let go = go' false in
+      let go_can_recover = go' true in
+
+      let skip get_typ =
+        get_data_buf ^^ get_typtbl ^^ get_typ ^^  G.i (Convert (Wasm_exts.Values.I32 I32Op.WrapI64)) ^^ compile_const_32 0l ^^
+        E.call_import env "rts" "skip_any"
+      in
+
+      (* This flag is set to return a coercion error at the very end
+         We cannot use (G.i Return) for early exit, or we’d leak stack space,
+         as Stack.with_words is used to allocate scratch space.
+      *)
+      let (set_failed, get_failed) = new_local env "failed" in
+      let set_failure = compile_unboxed_const 1L ^^ set_failed in
+      let when_failed f = get_failed ^^ E.if0 f G.nop in
+
+      (* This looks at a value and if it is coercion_error_value, sets the failure flag.
+         This propagates the error out of arrays, records, etc.
+       *)
+      let remember_failure get_val =
+          get_val ^^ compile_eq_const (coercion_error_value env) ^^
+          E.if0 set_failure G.nop
+      in
+
+      (* This sets the failure flag and puts coercion_error_value on the stack *)
+      let coercion_failed msg =
+        (* If we know that there is no backtracking `opt t` around, then just trap.
+           This gives a better error message
+        *)
+        get_can_recover ^^ E.else_trap_with env msg ^^
+        set_failure ^^ compile_unboxed_const (coercion_error_value env) in
+
+      (* returns true if we are looking at primitive type with this id *)
+      let check_prim_typ t =
+        get_idltyp ^^
+        compile_eq_const (Wasm.I64_convert.extend_i32_s (Int32.neg (Option.get (to_idl_prim Candid t))))
+      in
+
+      let with_prim_typ t f =
+        check_prim_typ t ^^
+        E.if1 I64Type f
+          ( skip get_idltyp ^^
+            coercion_failed ("IDL error: unexpected IDL type when parsing " ^ string_of_typ t)
+          )
+      in
+
+      let read_byte_tagged = function
+        | [code0; code1] ->
+          ReadBuf.read_byte env get_data_buf ^^
+          let (set_b, get_b) = new_local env "b" in
+          set_b ^^
+          get_b ^^
+          compile_eq_const 0L ^^
+          E.if1 I64Type
+          begin code0
+          end begin
+            get_b ^^ compile_eq_const 1L ^^
+            E.else_trap_with env "IDL error: byte tag not 0 or 1" ^^
+            code1
+          end
+        | _ -> assert false; (* can be generalized later as needed *)
+      in
+
+      let read_blob () =
+        let (set_len, get_len) = new_local env "len" in
+        let (set_x, get_x) = new_local env "x" in
+        ReadBuf.read_leb128 env get_data_buf ^^ set_len ^^
+
+        Blob.alloc env Tagged.B get_len ^^ set_x ^^
+        get_x ^^ Blob.payload_ptr_unskewed env ^^
+        ReadBuf.read_blob env get_data_buf get_len ^^
+        get_x
+      in
+
+      let read_principal sort () =
+        let (set_len, get_len) = new_local env "len" in
+        let (set_x, get_x) = new_local env "x" in
+        ReadBuf.read_leb128 env get_data_buf ^^ set_len ^^
+
+        (* at most 29 bytes, according to
+           https://sdk.dfinity.org/docs/interface-spec/index.html#principal
+        *)
+        get_len ^^ compile_unboxed_const 29L ^^ compile_comparison I64Op.LeU ^^
+        E.else_trap_with env "IDL error: principal too long" ^^
+
+        Blob.alloc env sort get_len ^^ set_x ^^
+        get_x ^^ Blob.payload_ptr_unskewed env ^^
+        ReadBuf.read_blob env get_data_buf get_len ^^
+        get_x
+      in
+
+      let read_text () =
+        let (set_len, get_len) = new_local env "len" in
+        ReadBuf.read_leb128 env get_data_buf ^^ set_len ^^
+        let (set_ptr, get_ptr) = new_local env "x" in
+        ReadBuf.get_ptr get_data_buf ^^ set_ptr ^^
+        ReadBuf.advance get_data_buf get_len ^^
+        (* validate *)
+        get_ptr ^^ get_len ^^ E.call_import env "rts" "utf8_validate" ^^
+        (* copy *)
+        get_ptr ^^ get_len ^^ Text.of_ptr_size env
+      in
+
+      let read_actor_data () =
+        read_byte_tagged
+          [ E.trap_with env "IDL error: unexpected actor reference"
+          ; read_principal Tagged.A ()
+          ]
+      in
+
+      (* returns true if get_arg_typ is a composite type of this id *)
+      let check_composite_typ get_arg_typ idl_tycon_id =
+        get_arg_typ ^^
+        compile_unboxed_const 0L ^^ compile_comparison I64Op.GeS ^^
+        E.if1 I64Type
+        begin
+          ReadBuf.alloc env (fun get_typ_buf ->
+            (* Update typ_buf *)
+            ReadBuf.set_ptr get_typ_buf (
+              get_typtbl ^^
+              get_arg_typ ^^ compile_mul_const Heap.word_size ^^
+              G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)) ^^
+              load_unskewed_ptr
+            ) ^^
+            ReadBuf.set_end get_typ_buf (ReadBuf.get_end get_data_buf) ^^
+            (* read sleb128 *)
+            ReadBuf.read_sleb128 env get_typ_buf ^^
+            (* Check it is the expected value *)
+            compile_eq_const (Wasm.I64_convert.extend_i32_s idl_tycon_id)
+          )
+        end
+        (compile_unboxed_const 0L)
+      in
+
+
+      (* checks that arg_typ is positive, looks it up in the table,
+         creates a fresh typ_buf pointing into the type description,
+         reads the type constructor index and traps or fails if it is the wrong one.
+         and passes the typ_buf to a subcomputation to read the type arguments *)
+      let with_composite_arg_typ get_arg_typ idl_tycon_id f =
+        (* make sure index is not negative *)
+        get_arg_typ ^^
+        compile_unboxed_const 0L ^^ compile_comparison I64Op.GeS ^^
+        E.if1 I64Type
+        begin
+          ReadBuf.alloc env (fun get_typ_buf ->
+            (* Update typ_buf *)
+            ReadBuf.set_ptr get_typ_buf (
+              get_typtbl ^^
+              get_arg_typ ^^ compile_mul_const Heap.word_size ^^
+              G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)) ^^
+              load_unskewed_ptr
+            ) ^^
+            ReadBuf.set_end get_typ_buf (ReadBuf.get_end get_data_buf) ^^
+            (* read sleb128 *)
+            ReadBuf.read_sleb128 env get_typ_buf ^^
+            (* Check it is the expected type constructor *)
+            compile_eq_const (Wasm.I64_convert.extend_i32_s idl_tycon_id) ^^
+            E.if1 I64Type
+            begin
+              f get_typ_buf
+            end
+            begin
+              skip get_arg_typ ^^
+              coercion_failed ("IDL error: unexpected IDL type when parsing " ^ string_of_typ t)
+            end
+          )
+        end
+        begin
+          skip get_arg_typ ^^
+          coercion_failed ("IDL error: unexpected IDL type when parsing " ^ string_of_typ t)
+        end
+      in
+
+      let with_alias_typ get_arg_typ =
+        get_arg_typ ^^
+        compile_unboxed_const 0L ^^ compile_comparison I64Op.GeS ^^
+        E.if1 I64Type
+        begin
+            with_composite_arg_typ get_arg_typ idl_alias (ReadBuf.read_sleb128 env)
+        end
+        begin
+          (* sanity check *)
+          get_arg_typ ^^
+          compile_eq_const (Wasm.I64_convert.extend_i32_s (Int32.neg (Option.get (to_idl_prim Candid (Prim Region))))) ^^
+          E.else_trap_with env "IDL error: unexpecting primitive alias type" ^^
+          get_arg_typ
+        end
+      in
+
+      let with_composite_typ idl_tycon_id f =
+        with_composite_arg_typ get_idltyp idl_tycon_id f
+      in
+
+      let with_record_typ f = with_composite_typ idl_record (fun get_typ_buf ->
+        Stack.with_words env "get_n_ptr" 1L (fun get_n_ptr ->
+          get_n_ptr ^^
+          ReadBuf.read_leb128 env get_typ_buf ^^
+          store_unskewed_ptr ^^
+          f get_typ_buf get_n_ptr
+        )
+      ) in
+
+      let with_blob_typ env f =
+        with_composite_typ idl_vec (fun get_typ_buf ->
+          ReadBuf.read_sleb128 env get_typ_buf ^^
+          compile_eq_const (-5L) (* Nat8 *) ^^
+          E.if1 I64Type
+            f
+            begin
+              skip get_idltyp ^^
+              coercion_failed "IDL error: blob not a vector of nat8"
+            end
+        )
+      in
+
+      let store_word32 =
+        G.i (Convert (Wasm_exts.Values.I32 I32Op.WrapI64)) ^^
+        G.i (Store {ty = I32Type; align = 0; offset = 0L; sz = None}) in
+
+      (* See comment on 64-bit destabilization on Note [mutable stable values] *)
+      let pointer_compression_shift = 3L in (* log2(word_size), 3 unused lower bits with 64-bit alignment *)
+      
+      let write_compressed_pointer env =
+        let (set_pointer, get_pointer) = new_local env "pointer" in
+        compile_add_const ptr_unskew ^^
+        compile_shrU_const pointer_compression_shift ^^
+        compile_add_const ptr_skew ^^
+        set_pointer ^^ get_pointer ^^
+        compile_unboxed_const 0xffff_ffffL ^^
+        compile_comparison I64Op.LeU ^^
+        E.else_trap_with env "Pointer cannot be compressed to 32 bit" ^^  
+        get_pointer ^^
+        store_word32
+      in
+
+      let read_compressed_pointer env get_buf =
+        let (set_pointer, get_pointer) = new_local env "pointer" in
+        ReadBuf.read_word32 env get_buf ^^
+        set_pointer ^^ get_pointer ^^
+        compile_eq_const 0L ^^
+        E.if1 I64Type
+          begin
+            get_pointer
+          end
+          begin
+            get_pointer ^^
+            compile_add_const ptr_unskew ^^
+            compile_shl_const pointer_compression_shift ^^
+            compile_add_const ptr_skew
+          end
+      in
+
+      let read_alias env t read_thing =
+        (* see Note [mutable stable values] *)
+        let (set_is_ref, get_is_ref) = new_local env "is_ref" in
+        let (set_result, get_result) = new_local env "result" in
+        let (set_cur, get_cur) = new_local env "cur" in
+        let (set_memo, get_memo) = new_local env "memo" in
+
+        let (set_arg_typ, get_arg_typ) = new_local env "arg_typ" in
+
+        with_alias_typ get_idltyp ^^ set_arg_typ ^^
+
+        (* Find out if it is a reference or not *)
+        ReadBuf.read_byte env get_data_buf ^^ set_is_ref ^^
+
+        (* If it is a reference, temporarily set the read buffer to that place *)
+        get_is_ref ^^
+        E.if0 begin
+          let (set_offset, get_offset) = new_local env "offset" in
+          ReadBuf.read_signed_word32 env get_data_buf ^^ 
+          set_offset ^^
+          (* A sanity check *)
+          get_offset ^^ compile_unboxed_const 0L ^^
+          compile_comparison I64Op.LtS ^^
+          E.else_trap_with env "Odd offset" ^^
+
+          ReadBuf.get_ptr get_data_buf ^^ set_cur ^^
+          ReadBuf.advance get_data_buf (get_offset ^^ compile_add_const (-4L))
+        end G.nop ^^
+
+        (* Remember location of ptr *)
+        ReadBuf.get_ptr get_data_buf ^^ set_memo ^^
+        (* Did we decode this already? *)
+        read_compressed_pointer env get_data_buf ^^ 
+        set_result ^^
+        get_result ^^ compile_eq_const 0L ^^
+        E.if0 begin
+          (* No, not yet decoded *)
+          (* Skip over type hash field *)
+          ReadBuf.read_word32 env get_data_buf ^^ compile_eq_const 0L ^^
+          E.else_trap_with env "Odd: Type hash scratch space not empty" ^^
+
+          (* Read the content *)
+          read_thing get_arg_typ (fun get_thing ->
+            (* This is called after allocation, but before descending
+               We update the memo location here so that loops work
+            *)
+            get_thing ^^ set_result ^^
+            get_memo ^^ get_result ^^ write_compressed_pointer env ^^
+            get_memo ^^ compile_add_const 4L ^^ Blob.lit env Tagged.B (typ_hash t) ^^ write_compressed_pointer env
+          )
+          end begin
+          (* Decoded before. Check type hash *)
+          read_compressed_pointer env get_data_buf ^^ Blob.lit env Tagged.B (typ_hash t) ^^
+          Blob.compare env (Some Operator.EqOp) ^^
+          E.else_trap_with env ("Stable memory error: Aliased at wrong type, expected: " ^ typ_hash t)
+        end ^^
+
+        (* If this was a reference, reset read buffer *)
+        get_is_ref ^^
+        E.if0 (ReadBuf.set_ptr get_data_buf get_cur) G.nop ^^
+
+        get_result
+      in
+
+
+      (* Now the actual deserialization *)
+      begin match t with
+      (* Primitive types *)
+      | Prim Nat ->
+        with_prim_typ t
+        begin
+          BigNum.compile_load_from_data_buf env get_data_buf false
+        end
+      | Prim Int ->
+        (* Subtyping with nat *)
+        check_prim_typ (Prim Nat) ^^
+        E.if1 I64Type
+          begin
+            BigNum.compile_load_from_data_buf env get_data_buf false
+          end
+          begin
+            with_prim_typ t
+            begin
+              BigNum.compile_load_from_data_buf env get_data_buf true
+            end
+          end
+      | Prim Float ->
+        with_prim_typ t
+        begin
+          ReadBuf.read_float64 env get_data_buf ^^
+          Float.box env
+        end
+      | Prim ((Int64|Nat64) as pty) ->
+        with_prim_typ t
+        begin
+          ReadBuf.read_word64 env get_data_buf ^^
+          BoxedWord64.box env pty
+        end
+      | Prim ((Int32|Nat32) as ty) ->
+        with_prim_typ t
+        begin
+          ReadBuf.read_word32 env get_data_buf ^^
+          TaggedSmallWord.msb_adjust ty ^^
+          TaggedSmallWord.tag env ty
+        end
+      | Prim Char ->
+        with_prim_typ t
+        begin
+          ReadBuf.read_word32 env get_data_buf ^^
+          TaggedSmallWord.check_and_msb_adjust_codepoint env ^^
+          TaggedSmallWord.tag env Char
+        end
+      | Prim ((Int16|Nat16) as ty) ->
+        with_prim_typ t
+        begin
+          ReadBuf.read_word16 env get_data_buf ^^
+          TaggedSmallWord.msb_adjust ty ^^
+          TaggedSmallWord.tag env ty
+        end
+      | Prim ((Int8|Nat8) as ty) ->
+        with_prim_typ t
+        begin
+          ReadBuf.read_byte env get_data_buf ^^
+          TaggedSmallWord.msb_adjust ty ^^
+          TaggedSmallWord.tag env ty
+        end
+      | Prim Bool ->
+        with_prim_typ t
+        begin
+          read_byte_tagged
+            [ Bool.lit false
+            ; Bool.lit true
+            ]
+        end
+      | Prim Null ->
+        with_prim_typ t (Opt.null_lit env)
+      | Any ->
+        skip get_idltyp ^^
+        (* Any vanilla value works here *)
+        Opt.null_lit env
+      | Prim Blob ->
+        with_blob_typ env (read_blob ())
+      | Prim Principal ->
+        with_prim_typ t
+        begin
+          read_byte_tagged
+            [ E.trap_with env "IDL error: unexpected principal reference"
+            ; read_principal Tagged.P ()
+            ]
+        end
+      | Prim Text ->
+        with_prim_typ t (read_text ())
+      | Tup [] -> (* e(()) = null *)
+        with_prim_typ t (Tuple.from_stack env 0)
+      (* Composite types *)
+      | Tup ts ->
+        with_record_typ (fun get_typ_buf get_n_ptr ->
+          let (set_val, get_val) = new_local env "val" in
+
+          G.concat_mapi (fun i t ->
+            (* skip all possible intermediate extra fields *)
+            get_typ_buf ^^ get_data_buf ^^ get_typtbl ^^ compile_const_32 (Int32.of_int i) ^^ get_n_ptr ^^
+            E.call_import env "rts" "find_field" ^^
+            Bool.from_rts_int32 ^^
+            E.if1 I64Type
+              begin
+                ReadBuf.read_sleb128 env get_typ_buf ^^
+                go env t ^^ set_val ^^
+                remember_failure get_val ^^
+                get_val
+              end
+              begin
+                match normalize t with
+                | Prim Null | Opt _ | Any -> Opt.null_lit env
+                | _ -> coercion_failed "IDL error: did not find tuple field in record"
+              end
+          ) ts ^^
+
+          (* skip all possible trailing extra fields *)
+          get_typ_buf ^^ get_data_buf ^^ get_typtbl ^^ get_n_ptr ^^
+          E.call_import env "rts" "skip_fields" ^^
+
+          Tuple.from_stack env (List.length ts)
+        )
+      | Obj ((Object | Memory), fs) ->
+        with_record_typ (fun get_typ_buf get_n_ptr ->
+          let (set_val, get_val) = new_local env "val" in
+
+          Object.lit_raw env (List.map (fun (h,f) ->
+            f.Type.lab, fun () ->
+              (* skip all possible intermediate extra fields *)
+              get_typ_buf ^^ get_data_buf ^^ get_typtbl ^^ compile_const_32 (Lib.Uint32.to_int32 h) ^^ get_n_ptr ^^
+              E.call_import env "rts" "find_field" ^^
+              Bool.from_rts_int32 ^^
+              E.if1 I64Type
+                begin
+                  ReadBuf.read_sleb128 env get_typ_buf ^^
+                  go env f.typ ^^ set_val ^^
+                  remember_failure get_val ^^
+                  get_val
+                  end
+                begin
+                  match normalize f.typ with
+                  | Prim Null | Opt _ | Any -> Opt.null_lit env
+                  | _ -> coercion_failed (Printf.sprintf "IDL error: did not find field %s in record" f.lab)
+                end
+          ) (sort_by_hash fs)) ^^
+
+          (* skip all possible trailing extra fields *)
+          get_typ_buf ^^ get_data_buf ^^ get_typtbl ^^ get_n_ptr ^^
+          E.call_import env "rts" "skip_fields"
+          )
+      | Array (Mut t) ->
+        read_alias env (Array (Mut t)) (fun get_array_typ on_alloc ->
+          let (set_len, get_len) = new_local env "len" in
+          let (set_x, get_x) = new_local env "x" in
+          let (set_val, get_val) = new_local env "val" in
+          let (set_arg_typ, get_arg_typ) = new_local env "arg_typ" in
+          (* TODO: if possible refactor to match new Array t code,
+             (perhaps too risky and unnecessary for extended candid due to lack of fancy opt subtyping, see #4243)
+          *)
+          with_composite_arg_typ get_array_typ idl_vec (ReadBuf.read_sleb128 env) ^^ set_arg_typ ^^
+          ReadBuf.read_leb128 env get_data_buf ^^ set_len ^^
+          Arr.alloc env Tagged.M get_len ^^ set_x ^^
+          on_alloc get_x ^^
+          get_len ^^ from_0_to_n env (fun get_i ->
+            get_x ^^ get_i ^^ Arr.unsafe_idx env ^^
+            get_arg_typ ^^ go env t ^^ set_val ^^
+            remember_failure get_val ^^
+            get_val ^^ store_ptr
+          ) ^^
+          get_x ^^
+          Tagged.allocation_barrier env ^^
+          G.i Drop
+        )
+      | Prim Region ->
+         read_alias env (Prim Region) (fun get_region_typ on_alloc ->
+          let (set_region, get_region) = new_local env "region" in
+          (* sanity check *)
+          get_region_typ ^^
+          compile_eq_const (Wasm.I64_convert.extend_i32_s (Int32.neg (Option.get (to_idl_prim Candid (Prim Region))))) ^^
+          E.else_trap_with env "deserialize_go (Region): unexpected idl_typ" ^^
+          (* pre-allocate a region object, with dummy fields *)
+          compile_unboxed_const 0L ^^ (* id *)
+          compile_unboxed_const 0L ^^ (* pagecount *)
+          Blob.lit env Tagged.B "" ^^ (* vec_pages *)
+          Region.alloc_region env ^^
+          set_region ^^
+          on_alloc get_region ^^
+          (* read and initialize the region's fields *)
+          get_region ^^
+          ReadBuf.read_word64 env get_data_buf ^^ (* id *)
+          ReadBuf.read_word32 env get_data_buf ^^ (* pagecount *)
+          read_blob () ^^ (* vec_pages *)
+          Region.init_region env
+        )
+      | Array t ->
+        let (set_len, get_len) = new_local env "len" in
+        let (set_x, get_x) = new_local env "x" in
+        let (set_val, get_val) = new_local env "val" in
+        let (set_arg_typ, get_arg_typ) = new_local env "arg_typ" in
+        with_composite_typ idl_vec (fun get_typ_buf ->
+          ReadBuf.read_sleb128 env get_typ_buf ^^
+          set_arg_typ ^^
+          ReadBuf.read_leb128 env get_data_buf ^^ set_len ^^
+          (* Don't decrement just check quota *)
+          Bool.lit_rts_int32 false ^^
+          get_len ^^
+          Registers.idl_limit_check env ^^
+          Arr.alloc env Tagged.I get_len ^^ set_x ^^
+          get_len ^^ from_0_to_n env (fun get_i ->
+          get_x ^^ get_i ^^ Arr.unsafe_idx env ^^
+          get_arg_typ ^^ go env t ^^ set_val ^^
+          remember_failure get_val ^^
+          get_val ^^ store_ptr
+        ) ^^
+        get_x ^^
+        Tagged.allocation_barrier env)
+      | Opt t ->
+        check_prim_typ (Prim Null) ^^
+        E.if1 I64Type (Opt.null_lit env)
+        begin
+          check_prim_typ Any ^^ (* reserved *)
+          E.if1 I64Type (Opt.null_lit env)
+          begin
+            check_composite_typ get_idltyp idl_opt ^^
+            E.if1 I64Type
+            begin
+              let (set_arg_typ, get_arg_typ) = new_local env "arg_typ" in
+              with_composite_typ idl_opt (ReadBuf.read_sleb128 env) ^^ set_arg_typ ^^
+              read_byte_tagged
+                [ Opt.null_lit env
+                ; let (set_val, get_val) = new_local env "val" in
+                  get_arg_typ ^^ go_can_recover env t ^^ set_val ^^
+                  get_val ^^ compile_eq_const (coercion_error_value env) ^^
+                  E.if1 I64Type
+                    (* decoding failed, but this is opt, so: return null *)
+                    (Opt.null_lit env)
+                    (* decoding succeeded, return opt value *)
+                    (Opt.inject env get_val)
+                ]
+            end
+            begin
+              (* this check corresponds to `not (null <: )` in the spec *)
+              match normalize t with
+              | Prim Null | Opt _ | Any ->
+                (* Ignore and return null *)
+                skip get_idltyp ^^
+                Opt.null_lit env
+              | _ ->
+                (* Try constituent type *)
+                let (set_val, get_val) = new_local env "val" in
+                get_idltyp ^^ go_can_recover env t ^^ set_val ^^
+                get_val ^^ compile_eq_const (coercion_error_value env) ^^
+                E.if1 I64Type
+                  (* decoding failed, but this is opt, so: return null *)
+                  (Opt.null_lit env)
+                  (* decoding succeeded, return opt value *)
+                  (Opt.inject env get_val)
+            end
+          end
+        end
+      | Variant vs ->
+        let (set_val, get_val) = new_local env "val" in
+        with_composite_typ idl_variant (fun get_typ_buf ->
+          (* Find the tag *)
+          let (set_n, get_n) = new_local env "len" in
+          ReadBuf.read_leb128 env get_typ_buf ^^ set_n ^^
+
+          let (set_tagidx, get_tagidx) = new_local env "tagidx" in
+          ReadBuf.read_leb128 env get_data_buf ^^ set_tagidx ^^
+
+          get_tagidx ^^ get_n ^^
+          compile_comparison I64Op.LtU ^^
+          E.else_trap_with env "IDL error: variant index out of bounds" ^^
+
+          (* Zoom past the previous entries *)
+          get_tagidx ^^ from_0_to_n env (fun _ ->
+            get_typ_buf ^^ E.call_import env "rts" "skip_leb128" ^^
+            get_typ_buf ^^ E.call_import env "rts" "skip_leb128"
+          ) ^^
+
+          (* Now read the tag *)
+          let (set_tag, get_tag) = new_local env "tag" in
+          ReadBuf.read_leb128 env get_typ_buf ^^ set_tag ^^
+          let (set_arg_typ, get_arg_typ) = new_local env "arg_typ" in
+          ReadBuf.read_sleb128 env get_typ_buf ^^ set_arg_typ ^^
+
+          List.fold_right (fun (h, {lab = l; typ = t; _}) continue ->
+              get_tag ^^ compile_eq_const (Wasm.I64_convert.extend_i32_u (Lib.Uint32.to_int32 h)) ^^
+              E.if1 I64Type
+                ( Variant.inject env l (
+                  get_arg_typ ^^ go env t ^^ set_val ^^
+                  remember_failure get_val ^^
+                  get_val
+                ))
+                continue
+            )
+            ( sort_by_hash vs )
+            ( skip get_arg_typ ^^
+              coercion_failed "IDL error: unexpected variant tag" )
+        )
+      | Func _ ->
+        (* See Note [Candid subtype checks] *)
+        get_rel_buf_opt ^^
+        E.if1 I64Type
+          begin
+            get_idltyp ^^
+            idl_sub env t
+          end
+          (Bool.lit true) ^^ (* if we don't have a subtype memo table, assume the types are ok *)
+        E.if1 I64Type
+          (with_composite_typ idl_func (fun _get_typ_buf ->
+            read_byte_tagged
+              [ E.trap_with env "IDL error: unexpected function reference"
+              ; let (set_actor, get_actor) = new_local env "actor" in
+                let (set_func, get_func) = new_local env "func" in
+                read_actor_data () ^^ set_actor ^^
+                read_text () ^^ set_func ^^
+                Arr.lit env Tagged.S [get_actor; get_func]
+              ]))
+          (skip get_idltyp ^^
+           coercion_failed "IDL error: incompatible function type")
+      | Obj (Actor, _) ->
+        (* See Note [Candid subtype checks] *)
+        get_rel_buf_opt ^^
+        E.if1 I64Type
+          begin
+            get_idltyp ^^
+            idl_sub env t
+          end
+          (Bool.lit true) ^^
+        E.if1 I64Type
+          (with_composite_typ idl_service
+             (fun _get_typ_buf -> read_actor_data ()))
+          (skip get_idltyp ^^
+           coercion_failed "IDL error: incompatible actor type")
+      | Mut t ->
+        read_alias env (Mut t) (fun get_arg_typ on_alloc ->
+          let (set_result, get_result) = new_local env "result" in
+          MutBox.alloc env ^^ set_result ^^
+          on_alloc get_result ^^
+          get_result ^^
+          get_arg_typ ^^ go env t ^^
+          MutBox.store_field env
+        )
+      | Non ->
+        skip get_idltyp ^^
+        coercion_failed "IDL error: deserializing value of type None"
+      | _ -> todo_trap env "deserialize" (Arrange_ir.typ t)
+      end ^^
+      (* Parsed value on the stack, return that, unless the failure flag is set *)
+      when_failed (compile_unboxed_const (coercion_error_value env) ^^ G.i Return)
+    )
+
+  let serialize env ts : G.t =
+    let name = Strm.name_for "serialize" ts in
+    (* returns data/length pointers (will be GC’ed next time!) *)
+    Func.share_code1 Func.Always env name ("x", I64Type) [I64Type; I64Type] (fun env get_x ->
+      let (set_data_size, get_data_size) = new_local env "data_size" in
+      let (set_refs_size, get_refs_size) = new_local env "refs_size" in
+
+      let (tydesc, _offsets, _idltyps) = type_desc env Candid ts in
+      let tydesc_len = Int64.of_int (String.length tydesc) in
+
+      (* Get object sizes *)
+      get_x ^^
+      buffer_size env (Type.seq ts) ^^
+      set_refs_size ^^
+      set_data_size ^^
+      (* check for overflow *)
+      get_data_size ^^
+      compile_add_const tydesc_len ^^
+      compile_unboxed_const tydesc_len ^^
+      compile_comparison I64Op.LtU ^^
+      E.then_trap_with env "serialization overflow" ^^
+
+      let (set_data_start, get_data_start) = new_local env "data_start" in
+      let (set_refs_start, get_refs_start) = new_local env "refs_start" in
+
+      (* Create a stream with suitable capacity and given header *)
+      Strm.create env get_data_size set_data_start get_data_start tydesc ^^
+      get_refs_size ^^ compile_mul_const Heap.word_size ^^ Blob.dyn_alloc_scratch env ^^ set_refs_start ^^
+
+      (* Serialize x into the buffer *)
+      get_x ^^
+      get_data_start ^^
+      get_refs_start ^^
+      serialize_go env (Type.seq ts) ^^
+
+      (* Sanity check: Did we fill exactly the buffer *)
+      get_refs_start ^^ get_refs_size ^^ compile_mul_const Heap.word_size ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)) ^^
+      compile_comparison I64Op.Eq ^^
+      E.else_trap_with env "reference buffer not filled" ^^
+
+      (* Verify that the stream is correctly filled *)
+      Strm.check_filled env get_data_start get_data_size ^^
+      get_refs_size ^^
+      compile_eq_const 0L ^^
+      E.else_trap_with env "cannot send references on IC System API" ^^
+
+      (* Extract the payload if possible *)
+      Strm.terminate env get_data_start get_data_size tydesc_len
+    )
+
+
+  let deserialize_from_blob extended env ts =
+    let ts_name = typ_seq_hash ts in
+    let name =
+      (* TODO(#3185): this specialization on `extended` seems redundant,
+         removing it might simplify things *and* share more code in binaries.
+         The only tricky bit might be the conditional Stack.dynamic_with_words bit... *)
+      if extended
+      then "@deserialize_extended<" ^ ts_name ^ ">"
+      else "@deserialize<" ^ ts_name ^ ">" in
+    Func.share_code2 Func.Always env name (("blob", I64Type), ("can_recover", I64Type)) (List.map (fun _ -> I64Type) ts) (fun env get_blob get_can_recover ->
+      let (set_data_size, get_data_size) = new_local env "data_size" in
+      let (set_refs_size, get_refs_size) = new_local env "refs_size" in
+      let (set_data_start, get_data_start) = new_local env "data_start" in
+      let (set_refs_start, get_refs_start) = new_local env "refs_start" in
+      let (set_arg_count, get_arg_count) = new_local env "arg_count" in
+      let (set_val, get_val) = new_local env "val" in
+
+      get_blob ^^ Blob.len env ^^ set_data_size ^^
+      get_blob ^^ Blob.payload_ptr_unskewed env ^^ set_data_start ^^
+
+      (* Allocate space for the reference buffer and copy it *)
+      compile_unboxed_const 0L ^^ set_refs_size (* none yet *) ^^
+
+      (* Allocate space for out parameters of parse_idl_header *)
+      Stack.with_words env "get_typtbl_size_ptr" 1L (fun get_typtbl_size_ptr ->
+      Stack.with_words env "get_typtbl_ptr" 1L (fun get_typtbl_ptr ->
+      Stack.with_words env "get_maintyps_ptr" 1L (fun get_maintyps_ptr ->
+
+      (* Allocate space for out parameters of idl_alloc_typtbl *)
+      Stack.with_words env "get_global_typtbl_ptr" 1L (fun get_global_typtbl_ptr ->
+      Stack.with_words env "get_global_typtbl_end_ptr" 1L (fun get_global_typtbl_end_ptr ->
+      Stack.with_words env "get_global_typtbl_size_ptr" 1L (fun get_global_typtbl_size_ptr ->
+
+      (* Set up read buffers *)
+      ReadBuf.alloc env (fun get_data_buf -> ReadBuf.alloc env (fun get_ref_buf ->
+
+      ReadBuf.set_ptr get_data_buf get_data_start ^^
+      ReadBuf.set_size get_data_buf get_data_size ^^
+      ReadBuf.set_ptr get_ref_buf get_refs_start ^^
+      ReadBuf.set_size get_ref_buf (get_refs_size ^^ compile_mul_const Heap.word_size) ^^
+
+      (* Go! *)
+      Bool.lit extended ^^ Bool.to_rts_int32 ^^ get_data_buf ^^ get_typtbl_ptr ^^ get_typtbl_size_ptr ^^ get_maintyps_ptr ^^
+      E.call_import env "rts" "parse_idl_header" ^^
+
+      (* Allocate global type type, if necessary for subtype checks *)
+      (if extended then
+         G.nop
+       else begin
+         get_global_candid_data env ^^
+         get_global_type_offsets env ^^
+         get_global_typtbl_ptr ^^ get_global_typtbl_end_ptr ^^ get_global_typtbl_size_ptr ^^
+         E.call_import env "rts" "idl_alloc_typtbl"
+      end) ^^
+
+      (* Allocate memo table, if necessary *)
+      with_rel_buf_opt env extended
+        (get_typtbl_size_ptr ^^ load_unskewed_ptr)
+        (get_global_typtbl_size_ptr ^^ load_unskewed_ptr)
+        (fun get_rel_buf_opt ->
+      begin
+        (* set up invariant register arguments *)
+        get_rel_buf_opt ^^ Registers.set_rel_buf_opt env ^^
+        get_data_buf ^^ Registers.set_data_buf env ^^
+        get_ref_buf ^^ Registers.set_ref_buf env ^^
+        get_typtbl_ptr ^^ load_unskewed_ptr ^^ Registers.set_typtbl env ^^
+        get_maintyps_ptr ^^ load_unskewed_ptr ^^ Registers.set_typtbl_end env ^^
+        get_typtbl_size_ptr ^^ load_unskewed_ptr ^^ Registers.set_typtbl_size env ^^
+        get_global_typtbl_ptr ^^ load_unskewed_ptr ^^ Registers.set_global_typtbl env ^^
+        get_global_typtbl_end_ptr ^^ load_unskewed_ptr ^^ Registers.set_global_typtbl_end env ^^
+        get_global_typtbl_size_ptr ^^ load_unskewed_ptr ^^ Registers.set_global_typtbl_size env ^^
+        Registers.reset_value_limit env get_blob get_rel_buf_opt
+      end ^^
+
+      (* set up a dedicated read buffer for the list of main types *)
+      ReadBuf.alloc env (fun get_main_typs_buf ->
+        ReadBuf.set_ptr get_main_typs_buf (get_maintyps_ptr ^^ load_unskewed_ptr) ^^
+        ReadBuf.set_end get_main_typs_buf (ReadBuf.get_end get_data_buf) ^^
+        ReadBuf.read_leb128 env get_main_typs_buf ^^ set_arg_count ^^
+
+        G.concat_map (fun t ->
+          let can_recover, default_or_trap = Type.(
+            match normalize t with
+            | Prim Null | Opt _ | Any ->
+              (Bool.lit true, fun msg -> Opt.null_lit env)
+            | _ ->
+              (get_can_recover, fun msg ->
+                get_can_recover ^^
+                E.if1 I64Type
+                   (compile_unboxed_const (coercion_error_value env))
+                   (E.trap_with env msg)))
+          in
+          get_arg_count ^^
+          compile_eq_const 0L ^^
+          E.if1 I64Type
+           (default_or_trap ("IDL error: too few arguments " ^ ts_name))
+           (begin
+              (* set up variable frame arguments *)
+              Stack.with_frame env "frame_ptr" 3L (fun () ->
+                (* idltyp *)
+                ReadBuf.read_sleb128 env get_main_typs_buf ^^
+                Stack.set_local env StackArgs.idltyp ^^
+                (* depth *)
+                compile_unboxed_const 0L ^^
+                Stack.set_local env StackArgs.depth ^^
+                (* recovery mode *)
+                can_recover ^^
+                Stack.set_local env StackArgs.can_recover ^^
+                deserialize_go env t
+             )
+             ^^ set_val ^^
+             get_arg_count ^^ compile_sub_const 1L ^^ set_arg_count ^^
+             get_val ^^ compile_eq_const (coercion_error_value env) ^^
+             (E.if1 I64Type
+               (default_or_trap "IDL error: coercion failure encountered")
+               get_val)
+            end)
+        ) ts ^^
+
+        (* Skip any extra arguments *)
+        compile_while env
+         (get_arg_count ^^ compile_rel_const I64Op.GtU 0L)
+         begin
+           get_data_buf ^^
+           get_typtbl_ptr ^^ load_unskewed_ptr ^^
+           ReadBuf.read_sleb128 env get_main_typs_buf ^^
+           G.i (Convert (Wasm_exts.Values.I32 I32Op.WrapI64)) ^^
+           compile_const_32 0l ^^
+           E.call_import env "rts" "skip_any" ^^
+           get_arg_count ^^ compile_sub_const 1L ^^ set_arg_count
+         end ^^
+
+        ReadBuf.is_empty env get_data_buf ^^
+        E.else_trap_with env ("IDL error: left-over bytes " ^ ts_name) ^^
+        ReadBuf.is_empty env get_ref_buf ^^
+        E.else_trap_with env ("IDL error: left-over references " ^ ts_name) ^^
+
+        (* Safety guard: The temporary pointers in the registers must no longer be used when a GC increment runs. *)
+        Registers.clear_registers env
+      )))))))))
+
+    ))
+
+  let deserialize env ts =
+    IC.arg_data env ^^
+    Bool.lit false ^^ (* can't recover *)
+    deserialize_from_blob false env ts
+
+(*
+Note [speculating for short (S)LEB encoded bignums]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+#3098 highlighted that a lot of heap garbage can be generated while reading in
+(S)LEB-encoded bignums. To make heap consumption optimal for every compactly
+representable `Int`, we resort to speculatively reading a 64-byte chunk from
+the `ReadBuf`. We call it speculative, because it may read past the end of the
+buffer (and thus end up containing junk bytes) or even fail because reading
+across Wasm page boundaries could cause trapping. (Consider the buffer ending
+3 bytes before the last-memory-page boundary and issuing a speculative 64-bit read for the
+address 2 bytes less than buffer end.) In case of failure to read data, `-1`
+(a sentinel) is returned. (The sentinel could be use-case specific when later
+the need arises.)
+
+In most cases the speculative read will come back with valid bytes. How many
+of those are relevant, can be judged by consulting the buffer-end pointer or
+analysing the 64-bit word directly. In the case of (S)LEB, the continuation and
+termination bits can be filtered and thus the encoding's last byte detected when
+present in the 64-bit word.
+
+If such a LEB boundary is detected, avenues open up for a much faster (than
+bytewise-sequential) parsing.
+
+After the data is interpreted, it's the client's responsibility to adjust the
+current buffer position.
+
+ *)
+
+(*
+Note [mutable stable values]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+We currently use a Candid derivative to serialize stable values. In addition to
+storing sharable data, we can also store mutable data (records with mutable
+fields and mutable arrays), and we need to preserve aliasing.
+
+To that end we extend Candid with a type constructor `alias t`.
+
+In the type table, alias t is represented by type code 1. All Candid type constructors
+are represented by negative numbers, so this cannot clash with anything and,
+conveniently, makes such values illegal Candid.
+
+The values of `alias t` are either
+
+ * i8(0) 0x00000000 0x00000000 M(v)
+   for one (typically the first) occurrence of v
+   The first 0x00000000 is the “memo field”, the second is the “type hash field”.
+   Both are scratch spaces for the benefit of the decoder.
+   We use **pointer compression** to store 64-bit pointer that are potentially larger
+   than 4GB but small enough to fit into 32-bit with the compressed representation.
+   Pointers are expected to refer to at most 8GB as the memory representation may grow
+   up to two times by switching from 32-bit to 64-bit.
+
+or
+
+ * i8(1) i32(offset) M(v)
+   for all other occurrences of v, where offset is the relative position of the
+   above occurrences from this reference.
+
+We map Motoko types to this as follows:
+
+  e([var t]) = alias e([t]) = alias vec e(t)
+  e({var field : t}) = record { field : alias e(t) }
+
+Why different? Because we need to alias arrays as a whole (we can’t even alias
+their fields, as they are manifestly part of the array heap structure), but
+aliasing records does not work, as aliased record values may appear at
+different types (due to subtyping), and Candid serialization is type-driven.
+Luckily records put all mutable fields behind an indirection (MutBox), so this
+works.
+
+The type-driven code in this module treats `Type.Mut` to always refer to an
+`MutBox`; for arrays the mutable case is handled directly.
+
+To detect and preserve aliasing, these steps are taken:
+
+ * In `buffer_size`, when we see a mutable thing (`Array` or `MutBox`), the
+   first time, we mark it by setting the heap tag to `StableSeen`.
+   This way, when we see it a second time, we can skip the value in the size
+   calculation.
+ * In `serialize`, when we see it a first time (tag still `StableSeen`),
+   we serialize it (first form above), and remember the absolute position
+   in the output buffer, abusing the heap tag here.
+   (Invariant: This absolute position is never `StableSeen`)
+   Upon a second visit (tag not `StableSeen`), we can thus fetch that absolute
+   position and calculate the offset.
+ * In `deserialize`, when we come across a `alias t`, we follow the offset (if
+   needed) to find the content.
+
+   If the memo field is still `0x00000000`, this is the first time we read
+   this, so we deserialize to the Motoko heap, and remember the **compressed**
+   64-bit vanilla pointer by overwriting the memo field.
+   We also store the **compressed** pointer to a blob with the type hash of 
+   the type we are serializing at in the type hash field.
+
+   NOTE for 64-bit destabilization: The Candid destabilization format historically 
+   only reserves 32-bit space for remembering addresses of aliases. However, when 
+   upgrading from old Candid destabilization to new enhanced orthogonal persistence, 
+   the deserialized objects may occupy larger object space (worst case the double space), 
+   such that pointers may be larger than 4GB. Therefore, we use pointer compression to 
+   safely narrow 64-bit addresses into 32-bit Candid(ish) memo space. The compression 
+   relies on the property that the 3 lower bits of the unskewed pointer are zero due 
+   to the 8-bit (64-bit) object alignment.
+
+   If it is not `0x00000000` then we can simply read the **compressed** pointer 
+   from there, after checking the type hash field to make sure we are aliasing at 
+   the same type.
+
+ *)
+
+(*
+Note [Candid subtype checks]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Deserializing Candid values requires a Candid subtype check when
+deserializing values of reference types (actors and functions).
+
+The subtype test is performed directly on the expected and actual
+candid type tables using RTS functions `idl_sub_buf_words`,
+`idl_sub_buf_init` and `idl_sub`. One type table and vector of types
+is generated statically from the list of statically known types
+encountered during code generation, the other is determined
+dynamically by, e.g. message payload. The latter will vary with
+each payload to decode.
+
+The static type table and a type descriptor are stored in passive 
+data segments. Instead of absolute memory addresses, the static type 
+table in the data segment only contains relative offsets into type 
+descriptor. When loaded, these offsets are patched by static addresses 
+that point into the type descriptor.
+
+The known Motoko types are accumulated in a global list as required
+and then, in a final compilation step, encoded to global type table
+and the type descriptor (sequence of type indices). The encoding is 
+stored in passive data segments referenced (by way of segment indices) 
+from dedicated wasm globals so that we can generate code that 
+references the globals before their final definitions are known.
+
+Deserializing a proper (not extended) Candid value stack allocates a
+mutable word buffer, of size determined by `idl_sub_buf_words`.
+The word buffer is used to initialize and provide storage for a
+Rust memo table (see bitrel.rs) memoizing the result of sub and
+super type tests performed during deserialization of a given Candid
+value sequence. The memo table is initialized once, using `idl_sub_buf_init`,
+then shared between recursive calls to deserialize, by threading the (possibly
+null) wasm address of the word buffer as an optional argument. The
+word buffer is stack allocated in generated code, not Rust, because
+it's size is dynamic and Rust doesn't seem to support dynamically-sized
+stack allocation.
+
+Currently, we only perform Candid subtype checks when decoding proper
+(not extended) Candid values. Extended values are required for
+stable variables only: we can omit the check, because compatibility
+should already be enforced by the static signature compatibility
+check. We use the `null`-ness of the word buffer pointer to
+dynamically determine whether to omit or perform Candid subtype checks.
+
+NB: Extending `idl_sub` to support extended, "stable" types (with mutable,
+invariant type constructors) would require extending the polarity argument
+from a Boolean to a three-valued argument to efficiently check equality for
+invariant type constructors in a single pass.
+*)
+
+end (* Serialization *)
+
+(* OldStabilization as migration code: 
+  Deserializing a last time from Candid-serialized stable objects into the stable heap:
+   * stable variables; and
+   * virtual stable memory.
+   c.f.
+   * ../../design/Stable.md
+   * ../../design/StableMemory.md
+*)
+module OldStabilization = struct
+  let load_word32 = G.i (Load {ty = I32Type; align = 0; offset = 0L; sz = None})
+  let store_word32 = G.i (Store {ty = I32Type; align = 0; offset = 0L; sz = None})
+  let write_word32 env = StableMem.write env false "word32" I32Type 4L store_word32
+
+  (* read and clear word32 from stable mem offset on stack *)
+  let read_and_clear_word32 env =
+    match E.mode env with
+    | Flags.ICMode | Flags.RefMode ->
+      Func.share_code1 Func.Always env "__stablemem_read_and_clear_word32"
+        ("offset", I64Type) [I64Type]
+        (fun env get_offset ->
+          Stack.with_words env "temp_ptr" 1L (fun get_temp_ptr ->
+            let (set_word, get_word) = new_local32 env "word" in
+            (* read word *)
+            get_temp_ptr ^^
+            get_offset ^^
+            compile_unboxed_const 4L ^^
+            StableMem.stable64_read env ^^
+            get_temp_ptr ^^ load_word32 ^^
+            set_word ^^
+            (* write 0 *)
+            get_temp_ptr ^^ compile_const_32 0l ^^ store_word32 ^^
+            get_offset ^^
+            get_temp_ptr ^^
+            compile_unboxed_const 4L ^^
+            StableMem.stable64_write env ^^
+            (* return word *)
+            get_word ^^
+            G.i (Convert (Wasm_exts.Values.I64 I64Op.ExtendUI32))
+        ))
+    | _ -> assert false
+
+  (* TODO: rewrite using MemoryFill *)
+  let blob_clear env =
+    Func.share_code1 Func.Always env "blob_clear" ("x", I64Type) [] (fun env get_x ->
+      let (set_ptr, get_ptr) = new_local env "ptr" in
+      let (set_len, get_len) = new_local env "len" in
+      get_x ^^
+      Blob.as_ptr_len env ^^
+      set_len ^^
+      set_ptr ^^
+
+      (* round to word size *)
+      get_len ^^
+      compile_add_const (Int64.sub Heap.word_size 1L) ^^
+      compile_divU_const Heap.word_size ^^
+
+      (* clear all words *)
+      from_0_to_n env (fun get_i ->
+        get_ptr ^^
+        compile_unboxed_const 0L ^^
+        store_unskewed_ptr ^^
+        get_ptr ^^
+        compile_add_const Heap.word_size ^^
+        set_ptr))
+
+  let candid_destabilize env ty save_version =
+    match E.mode env with
+    | Flags.ICMode | Flags.RefMode ->
+      let (set_pages, get_pages) = new_local env "pages" in
+      StableMem.stable64_size env ^^
+      set_pages ^^
+
+      get_pages ^^
+      compile_test I64Op.Eqz ^^
+      E.if1 I64Type
+        begin
+          (* Case: Size zero ==> Nothing in stable memory,
+             so result becomes the nil-valued record. *)
+          let (_, fs) = Type.as_obj ty in
+          let fs' = List.map
+           (fun f -> (f.Type.lab, fun () -> Opt.null_lit env))
+           fs
+          in
+          StableMem.get_mem_size env ^^
+          compile_test I64Op.Eqz ^^
+          E.else_trap_with env "StableMem.mem_size non-zero" ^^
+          compile_unboxed_const StableMem.version_stable_heap_no_regions ^^
+          save_version ^^
+          Object.lit_raw env fs'
+        end
+        begin
+          (* Case: Non-zero size. *)
+          let (set_marker, get_marker) = new_local env "marker" in
+          let (set_len, get_len) = new_local env "len" in
+          let (set_offset, get_offset) = new_local env "offset" in
+          compile_unboxed_const 0L ^^
+          read_and_clear_word32 env ^^
+          set_marker ^^
+
+          get_marker ^^
+          compile_test I64Op.Eqz ^^
+          E.if0
+            begin
+              (* Sub-Case: version 1 or 2:
+                 Regions/Experimental API and stable vars. *)
+              let (set_M, get_M) = new_local env "M" in
+              let (set_version, get_version) = new_local env "version" in
+              let (set_N, get_N) = new_local env "N" in
+
+              StableMem.stable64_size env ^^
+              compile_sub_const 1L ^^
+              compile_shl_const (Int64.of_int page_size_bits) ^^
+              set_M ^^
+
+              (* read version *)
+              get_M ^^
+              compile_add_const (Int64.sub page_size 4L) ^^
+              read_and_clear_word32 env ^^
+              set_version ^^
+              get_version ^^
+              save_version ^^
+
+              (* check version *)
+              get_version ^^
+              compile_unboxed_const (StableMem.version_max) ^^
+              compile_comparison I64Op.GtU ^^
+              E.then_trap_with env (Printf.sprintf
+                "higher stable memory version (expected 1..%s)"
+                (Int64.to_string StableMem.version_max)) ^^
+
+              (* restore StableMem bytes [0..4) *)
+              compile_unboxed_const 0L ^^
+              get_M ^^
+              compile_add_const (Int64.sub page_size 8L) ^^
+              read_and_clear_word32 env ^^
+              G.i (Convert (Wasm_exts.Values.I32 I32Op.WrapI64)) ^^
+              write_word32 env ^^
+
+              (* restore mem_size *)
+              get_M ^^
+              compile_add_const (Int64.sub page_size 12L) ^^
+              read_and_clear_word32 env ^^ (*TODO: use 64 bits *)
+              StableMem.set_mem_size env ^^
+
+              StableMem.get_mem_size env ^^
+              compile_shl_const (Int64.of_int page_size_bits) ^^
+              set_N ^^
+
+              (* set len *)
+              get_N ^^
+              read_and_clear_word32 env ^^
+              set_len ^^
+
+              (* set offset *)
+              get_N ^^
+              compile_add_const 4L ^^
+              set_offset
+            end
+            begin
+              (* Sub-Case: Version 0.
+                 Stable vars with NO Regions/Experimental API. *)
+              (* assert mem_size == 0 *)
+              StableMem.get_mem_size env ^^
+              compile_test I64Op.Eqz ^^
+              E.else_trap_with env "unexpected, non-zero stable memory size" ^^
+
+              (* set len *)
+              get_marker ^^
+              set_len ^^
+
+              (* set offset *)
+              compile_unboxed_const 4L ^^
+              set_offset ^^
+
+              compile_unboxed_const (Int64.of_int 0) ^^
+              save_version
+            end ^^ (* if_ *)
+
+          let (set_blob, get_blob) = new_local env "blob" in
+          (* read blob from stable memory *)
+          Blob.alloc env Tagged.B get_len ^^ set_blob ^^
+          get_blob ^^ Blob.payload_ptr_unskewed env ^^
+          get_offset ^^
+          get_len ^^
+          StableMem.stable64_read env ^^
+
+          let (set_val, get_val) = new_local env "val" in
+          (* deserialize blob to val *)
+          get_blob ^^
+          Bool.lit false ^^ (* can't recover *)
+          Serialization.deserialize_from_blob true env [ty] ^^
+          set_val ^^
+
+          (* clear blob contents *)
+          get_blob ^^
+          blob_clear env ^^
+
+          (* copy zeros from blob to stable memory *)
+          get_offset ^^
+          get_blob ^^ Blob.payload_ptr_unskewed env ^^
+          get_blob ^^ Blob.len env ^^
+          StableMem.stable64_write env ^^
+
+          (* return val *)
+          get_val
+        end
+    | _ -> assert false
+
+  let load env actor_type upgrade_version =
+    candid_destabilize env actor_type upgrade_version ^^
+    UpgradeStatistics.add_instructions env
+end
+
+(* New stable memory layout with dedicated version for enhanced orthogonal persistence.
+   This prevents unwanted forward compatibility of old compiled programs that rely on Candid destabilization.
+   This also helps to detect graph-copy-based destabilization that has priority over enhanced orthogonal persistence.
+  If size == 0: empty
+  let end = physical size * page_size
+  If logical size N > 0:
+    [0..4)          0 (first word is backed up at `end-8`)
+    [4..N)          
+            
+    [end-16..end-8) 
+    [end-8..end-4)  
+    [end-4..end)   
+  ending at page boundary
+  Note: The first word must be empty to distinguish this version from the Candid legacy version 0 (which has first word != 0).
+*)
+module NewStableMemory = struct
+  let physical_size env =
+    IC.system_call env "stable64_size" ^^
+    compile_shl_const (Int64.of_int page_size_bits)
+
+  let store_at_end env offset typ get_value =
+    physical_size env ^^
+    compile_sub_const offset ^^
+    get_value ^^
+    match typ with
+    | I32Type -> StableMem.write_word32 env
+    | I64Type -> StableMem.write_word64 env
+    | _ -> assert false
+
+  let read_from_end env offset typ =
+    physical_size env ^^
+    compile_sub_const offset ^^
+    match typ with
+    | I32Type -> StableMem.read_word32 env
+    | I64Type -> StableMem.read_word64 env
+    | _ -> assert false
+
+  let clear_at_end env offset typ =
+    store_at_end env offset typ 
+    (match typ with
+    | I32Type -> compile_const_32 0l
+    | I64Type -> compile_unboxed_const 0L
+    | _ -> assert false
+    )
+
+  let logical_size_offset = 16L
+  let first_word_backup_offset = 8L
+  let version_offset = 4L
+
+  let upgrade_version_from_candid env =
+    StableMem.set_version env ^^
+    StableMem.get_version env ^^
+    compile_eq_const StableMem.legacy_version_no_stable_memory ^^
+    StableMem.get_version env ^^
+    compile_eq_const StableMem.legacy_version_some_stable_memory ^^
+    G.i (Binary (Wasm_exts.Values.I64 I64Op.Or)) ^^
+    E.if1 I64Type
+    begin
+      compile_unboxed_const StableMem.version_stable_heap_no_regions
+    end
+    begin
+      StableMem.get_version env ^^
+      compile_eq_const StableMem.legacy_version_regions ^^
+      E.else_trap_with env "Unsupported stable memory version when upgrading from Candid stabilization" ^^
+      compile_unboxed_const StableMem.version_stable_heap_regions
+    end ^^
+    StableMem.set_version env
+    
+  
+  let upgrade_version_from_graph_stabilization env =
+    StableMem.get_version env ^^
+    compile_eq_const StableMem.version_graph_copy_no_regions ^^
+    E.if1 I64Type
+    begin
+      compile_unboxed_const StableMem.version_stable_heap_no_regions
+    end
+    begin
+      StableMem.get_version env ^^
+      compile_eq_const StableMem.version_graph_copy_regions ^^
+      E.else_trap_with env "Unsupported stable memory version when upgrading from graph-copy-based stabilization" ^^
+      compile_unboxed_const StableMem.version_stable_heap_regions
+    end ^^
+    StableMem.set_version env
+
+  let grow_size env amount =
+    StableMem.get_mem_size env ^^
+    compile_shl_const (Int64.of_int page_size_bits) ^^
+    compile_unboxed_const amount ^^
+    StableMem.ensure env
+
+  let backup env =
+    let (set_first_word, get_first_word) = new_local32 env "first_word" in
+    physical_size env ^^
+    compile_test I64Op.Eqz ^^
+    E.if0
+      G.nop
+      begin
+        (* read and clear first word *)
+        compile_unboxed_const 0L ^^ StableMem.read_word32 env ^^ set_first_word ^^
+        compile_unboxed_const 0L ^^ compile_const_32 0l ^^ StableMem.write_word32 env ^^
+
+        grow_size env logical_size_offset ^^
+
+        (* backup logical size *)
+        store_at_end env logical_size_offset I64Type (StableMem.get_mem_size env) ^^
+
+        (* backup first word *)
+        store_at_end env first_word_backup_offset I32Type get_first_word ^^
+
+        (* store the version *)
+        store_at_end env version_offset I32Type (StableMem.get_version env ^^ G.i (Convert (Wasm_exts.Values.I32 I32Op.WrapI64)))
+      end
+
+  let restore env =
+    let (set_first_word, get_first_word) = new_local32 env "first_word" in
+    physical_size env ^^
+    compile_test I64Op.Eqz ^^
+    E.if0
+      begin
+        compile_unboxed_const 0L ^^ StableMem.set_mem_size env
+      end
+      begin
+        (* check the version *)
+        read_from_end env version_offset I32Type ^^
+        G.i (Convert (Wasm_exts.Values.I64 I64Op.ExtendUI32)) ^^
+        StableMem.set_version env ^^
+        StableMem.get_version env ^^
+        compile_eq_const StableMem.version_stable_heap_no_regions ^^
+        StableMem.get_version env ^^
+        compile_eq_const StableMem.version_stable_heap_regions ^^
+        G.i (Binary (Wasm_exts.Values.I64 I64Op.Or)) ^^
+        E.else_trap_with env (Printf.sprintf
+          "unsupported stable memory version (expected %s or %s)"
+           (Int64.to_string StableMem.version_stable_heap_no_regions)
+           (Int64.to_string StableMem.version_stable_heap_regions)) ^^
+
+        (* read first word *)
+        read_from_end env first_word_backup_offset I32Type ^^
+        set_first_word ^^
+        
+        (* restore logical size *)
+        read_from_end env logical_size_offset I64Type ^^
+        StableMem.set_mem_size env ^^
+
+        (* clear size and version *)
+        clear_at_end env logical_size_offset I64Type ^^
+        clear_at_end env first_word_backup_offset I32Type ^^
+        clear_at_end env version_offset I32Type ^^
+
+        (* restore first word *)
+        compile_unboxed_const 0L ^^ get_first_word ^^ StableMem.write_word32 env
+      end
+end
+
+(* Enhanced orthogonal persistence *)
+module EnhancedOrthogonalPersistence = struct
+  let load_stable_actor env = E.call_import env "rts" "load_stable_actor"
+    
+  let save_stable_actor env = E.call_import env "rts" "save_stable_actor"
+
+  let free_stable_actor env = E.call_import env "rts" "free_stable_actor"
+
+  let create_type_descriptor env actor_type =
+    let (candid_type_desc, type_offsets, type_indices) = Serialization.(type_desc env Persistence [actor_type]) in
+    let serialized_offsets = StaticBytes.(as_bytes [i64s (List.map Int64.of_int type_offsets)]) in
+    assert (type_indices = [0l]);
+    Blob.lit env Tagged.B candid_type_desc ^^
+    Blob.lit env Tagged.B serialized_offsets
+
+  let register_stable_type env actor_type =
+    create_type_descriptor env actor_type ^^
+    E.call_import env "rts" "register_stable_type"
+
+  let load_old_field env field get_old_actor =
+    if field.Type.typ = Type.(Opt Any) then
+      (* A stable variable may have been promoted to type `Any`: Therefore, drop its former content. *)
+      Opt.inject env (Tuple.compile_unit env)
+    else
+      (get_old_actor ^^ Object.load_idx_raw env field.Type.lab)
+
+  let empty_actor env actor_type =
+    let (_, field_declarations) = Type.as_obj actor_type in
+    let field_initializers = List.map
+      (fun field -> (field.Type.lab, fun () -> Opt.null_lit env))
+      field_declarations
+    in
+    Object.lit_raw env field_initializers
+
+  (* Support additional fields in an upgraded actor. *)
+  let upgrade_actor env actor_type =
+    let set_old_actor, get_old_actor = new_local env "old_actor" in
+    let get_field_value field = 
+      get_old_actor ^^
+      Object.contains_field env field.Type.lab ^^
+      (E.if1 I64Type
+        (load_old_field env field get_old_actor)
+        (Opt.null_lit env)
+      ) in
+    let (_, field_declarations) = Type.as_obj actor_type in
+    let field_initializers = List.map
+      (fun field -> (field.Type.lab, fun () -> (get_field_value field)))
+      field_declarations
+    in
+    set_old_actor ^^
+    Object.lit_raw env field_initializers
+
+  let recover_actor env actor_type =
+    load_stable_actor env ^^
+    upgrade_actor env actor_type ^^
+    free_stable_actor env
+
+  let save env actor_type =
+    IC.get_actor_to_persist env ^^
+    save_stable_actor env ^^
+    NewStableMemory.backup env ^^
+    UpgradeStatistics.set_instructions env
+
+  let load env actor_type =
+    register_stable_type env actor_type ^^
+    load_stable_actor env ^^
+    compile_test I64Op.Eqz ^^
+    (E.if1 I64Type
+      (empty_actor env actor_type)
+      (recover_actor env actor_type)
+    ) ^^
+    NewStableMemory.restore env ^^
+    UpgradeStatistics.add_instructions env
+
+  let initialize env actor_type =
+    register_stable_type env actor_type
+end (* EnhancedOrthogonalPersistence *)
+
+(* As fallback when doing persistent memory layout changes. *)
+module GraphCopyStabilization = struct
+  let is_graph_stabilization_started env =
+    E.call_import env "rts" "is_graph_stabilization_started" ^^ Bool.from_rts_int32
+
+  let start_graph_stabilization env actor_type =
+    EnhancedOrthogonalPersistence.create_type_descriptor env actor_type ^^
+    E.call_import env "rts" "start_graph_stabilization"
+
+  let graph_stabilization_increment env =
+    E.call_import env "rts" "graph_stabilization_increment" ^^ Bool.from_rts_int32
+
+  let start_graph_destabilization env actor_type =
+    EnhancedOrthogonalPersistence.create_type_descriptor env actor_type ^^
+    E.call_import env "rts" "start_graph_destabilization"
+
+  let graph_destabilization_increment env =
+    E.call_import env "rts" "graph_destabilization_increment" ^^ Bool.from_rts_int32
+
+  let get_graph_destabilized_actor env actor_type =
+    E.call_import env "rts" "get_graph_destabilized_actor" ^^
+    EnhancedOrthogonalPersistence.upgrade_actor env actor_type
+end
+
+module GCRoots = struct
+  let register_static_variables env = 
+    E.(env.object_pool.frozen) := true;
+    Func.share_code0 Func.Always env "initialize_root_array" [] (fun env ->
+      let length = Int64.of_int (E.object_pool_size env) in
+      compile_unboxed_const length ^^
+      E.call_import env "rts" "initialize_static_variables" ^^
+      E.iterate_object_pool env (fun index allocation ->
+        compile_unboxed_const (Int64.of_int index) ^^
+        allocation env ^^
+        E.call_import env "rts" "set_static_variable"
+      )
+    )
+end (* GCRoots *)
+
+module StackRep = struct
+  open SR
+
+  (*
+     Most expressions have a “preferred”, most optimal, form. Hence,
+     compile_exp put them on the stack in that form, and also returns
+     the form it chose.
+
+     But the users of compile_exp usually want a specific form as well.
+     So they use compile_exp_as, indicating the form they expect.
+     compile_exp_as then does the necessary coercions.
+   *)
+
+  let of_arity n =
+    if n = 1 then Vanilla else UnboxedTuple n
+
+  (* The stack rel of a primitive type, i.e. what the binary operators expect *)
+  let of_type t =
+    let open Type in
+    match normalize t with
+    | Prim Bool -> SR.bool
+    | Prim (Nat | Int) -> Vanilla
+    | Prim ((Nat8 | Nat16 | Nat32 | Nat64 | Int8 | Int16 | Int32 | Int64 | Char) as pty) -> UnboxedWord64 pty
+    | Prim (Text | Blob | Principal) -> Vanilla
+    | Prim Float -> UnboxedFloat64
+    | Obj (Actor, _) -> Vanilla
+    | Func (Shared _, _, _, _, _) -> Vanilla
+    | p -> todo "StackRep.of_type" (Arrange_ir.typ p) Vanilla
+
+  (* The env looks unused, but will be needed once we can use multi-value, to register
+     the complex types in the environment.
+     For now, multi-value block returns are handled via FakeMultiVal. *)
+  let to_block_type env = function
+    | Vanilla -> [I64Type]
+    | UnboxedWord64 _ -> [I64Type]
+    | UnboxedFloat64 -> [F64Type]
+    | UnboxedTuple n -> Lib.List.make n I64Type
+    | Const _ -> []
+    | Unreachable -> []
+
+  let to_string = function
+    | Vanilla -> "Vanilla"
+    | UnboxedWord64 pty -> prim_fun_name pty "UnboxedWord64"
+    | UnboxedFloat64 -> "UnboxedFloat64"
+    | UnboxedTuple n -> Printf.sprintf "UnboxedTuple %d" n
+    | Unreachable -> "Unreachable"
+    | Const _ -> "Const"
+
+  let join (sr1 : t) (sr2 : t) = match sr1, sr2 with
+    | _, _ when SR.eq sr1 sr2 -> sr1
+    | Unreachable, sr2 -> sr2
+    | sr1, Unreachable -> sr1
+
+    | Const _, Const _ -> Vanilla
+    | Const _, sr2_ -> sr2
+    | sr1, Const _ -> sr1
+
+    | _, Vanilla -> Vanilla
+    | Vanilla, _ -> Vanilla
+
+    | UnboxedTuple n, UnboxedTuple m when n = m -> sr1
+
+    | _, _ ->
+      Printf.eprintf "Invalid stack rep join (%s, %s)\n"
+        (to_string sr1) (to_string sr2); sr1
+
+  let joins = List.fold_left join Unreachable
+
+  let drop env (sr_in : t) =
+    match sr_in with
+    | Vanilla | UnboxedWord64 _ | UnboxedFloat64 -> G.i Drop
+    | UnboxedTuple n -> G.table n (fun _ -> G.i Drop)
+    | Const _ | Unreachable -> G.nop
+
+  let rec build_constant env = function
+  | Const.Lit (Const.Vanilla value) -> E.Vanilla value
+  | Const.Lit (Const.Bool number) -> E.Vanilla (Bool.vanilla_lit number)
+  | Const.Lit (Const.Text payload) -> Blob.constant env Tagged.T payload
+  | Const.Lit (Const.Blob payload) -> Blob.constant env Tagged.B payload
+  | Const.Lit (Const.Null) -> E.Vanilla Opt.null_vanilla_lit
+  | Const.Lit (Const.BigInt number) -> BigNum.constant env number
+  | Const.Lit (Const.Word64 (pty, number)) -> BoxedWord64.constant env pty number
+  | Const.Lit (Const.Float64 number) -> Float.constant env number
+  | Const.Opt value -> Opt.constant env (build_constant env value)
+  | Const.Fun (_, get_fi, _) -> Closure.constant env get_fi
+  | Const.Message _ -> assert false
+  | Const.Unit -> E.Vanilla (Tuple.unit_vanilla_lit env)
+  | Const.Tag (tag, value) ->
+      let payload = build_constant env value in
+      Tagged.shared_object env (fun env ->
+        let materialized_payload = Tagged.materialize_shared_value env payload in
+        Variant.inject env tag materialized_payload
+      )
+  | Const.Array elements -> 
+      let constant_elements = List.map (build_constant env) elements in
+      Arr.constant env Tagged.I constant_elements
+  | Const.Tuple elements -> 
+      let constant_elements = List.map (build_constant env) elements in
+      Arr.constant env Tagged.T constant_elements
+  | Const.Obj fields ->
+      let constant_fields = List.map (fun (name, value) -> (name, build_constant env value)) fields in
+      Object.constant env constant_fields
+
+  let materialize_constant env value =
+    Tagged.materialize_shared_value env (build_constant env value)
+
+  let adjust env (sr_in : t) sr_out =
+    if eq sr_in sr_out
+    then G.nop
+    else match sr_in, sr_out with
+    | Unreachable, Unreachable -> G.nop
+    | Unreachable, _ -> G.i Unreachable
+
+    | UnboxedTuple n, Vanilla -> Tuple.from_stack env n
+    | Vanilla, UnboxedTuple n -> Tuple.to_stack env n
+
+    (* BoxedWord64 types *)
+    | UnboxedWord64 (Type.(Int64 | Nat64) as pty), Vanilla ->
+      BoxedWord64.box env pty
+    | Vanilla, UnboxedWord64 (Type.(Int64 | Nat64) as pty) -> 
+      BoxedWord64.unbox env pty
+
+    (* TaggedSmallWord types *)
+    | UnboxedWord64 (Type.(Int8 | Nat8 | Int16 | Nat16 | Int32 | Nat32 | Char) as pty), Vanilla ->
+      TaggedSmallWord.tag env pty
+    | Vanilla, UnboxedWord64 (Type.(Nat8 | Int8 | Nat16 | Int16 | Int32 | Nat32 | Char) as pty) ->
+      TaggedSmallWord.untag env pty
+
+    | UnboxedFloat64, Vanilla -> Float.box env
+    | Vanilla, UnboxedFloat64 -> Float.unbox env
+
+    | Const value, Vanilla -> 
+        materialize_constant env value
+    | Const Const.Lit (Const.Vanilla n), UnboxedWord64 ty ->
+        compile_unboxed_const n ^^
+        TaggedSmallWord.untag env ty
+    | Const Const.Lit (Const.Word64 (ty1, n)), UnboxedWord64 ty2 when ty1 = ty2 -> 
+        compile_unboxed_const n
+    | Const Const.Lit (Const.Float64 f), UnboxedFloat64 -> Float.compile_unboxed_const f
+    | Const c, UnboxedTuple 0 -> G.nop
+    | Const Const.Tuple cs, UnboxedTuple n ->
+      assert (n = List.length cs);
+      G.concat_map (fun c -> materialize_constant env c) cs
+    | _, _ ->
+      Printf.eprintf "Unknown stack_rep conversion %s -> %s\n"
+        (to_string sr_in) (to_string sr_out);
+      assert false
+
+end (* StackRep *)
+
+module VarEnv = struct
+
+  (* A type to record where Motoko names are stored. *)
+  type varloc =
+    (* A Wasm Local of the current function, directly containing the value,
+       in the given stackrep (Vanilla, UnboxedWord64, …) so far
+       Used for immutable and mutable, non-captured data *)
+    | Local of SR.t * int32
+    (* A Wasm Local of the current function, that points to memory location,
+       which is a MutBox.  Used for mutable captured data *)
+    | HeapInd of int32
+    (* A static variable accessed by an index via the runtime system, refers to a MutBox,
+       belonging to the GC root set *)
+    | Static of int64
+    (* Constant literals can reside in dynamic heap *)
+    | Const of Const.v
+    (* public method *)
+    | PublicMethod of int32 * string
+
+  let is_non_local : varloc -> bool = function
+    | Local _
+    | HeapInd _ -> false
+    | Static _
+    | PublicMethod _
+    | Const _ -> true
+
+  type lvl = TopLvl | NotTopLvl
+
+  (*
+  The source variable environment:
+   - Whether we are on the top level
+   - In-scope variables
+   - scope jump labels
+  *)
+
+
+  module NameEnv = Env.Make(String)
+  type t = {
+    lvl : lvl;
+    vars : (varloc * Type.typ) NameEnv.t; (* variables ↦ their location and type *)
+    labels : G.depth NameEnv.t; (* jump label ↦ their depth *)
+  }
+
+  let empty_ae = {
+    lvl = TopLvl;
+    vars = NameEnv.empty;
+    labels = NameEnv.empty;
+  }
+
+  (* Creating a local environment, resetting the local fields,
+     and removing bindings for local variables (unless they are at global locations)
+  *)
+
+  let mk_fun_ae ae = { ae with
+    lvl = NotTopLvl;
+    vars = NameEnv.filter (fun v (l, _) ->
+      let non_local = is_non_local l in
+      (* For debugging, enable this:
+      (if not non_local then Printf.eprintf "VarEnv.mk_fun_ae: Removing %s\n" v);
+      *)
+      non_local
+    ) ae.vars;
+  }
+  let lookup ae var =
+    match NameEnv.find_opt var ae.vars with
+      | Some e -> Some e
+      | None   -> Printf.eprintf "Could not find %s\n" var; None
+
+  let lookup_var ae var =
+    match lookup ae var with
+      | Some (l, _) -> Some l
+      | None -> None
+
+  let needs_capture ae var = match lookup_var ae var with
+    | Some l -> not (is_non_local l)
+    | None -> assert false
+
+  let add_local_with_heap_ind env (ae : t) name typ =
+      let i = E.add_anon_local env I64Type in
+      E.add_local_name env i name;
+      ({ ae with vars = NameEnv.add name ((HeapInd i), typ) ae.vars }, i)
+
+  let add_static_variable (ae : t) name index typ =
+      { ae with vars = NameEnv.add name ((Static index), typ) ae.vars }
+
+  let add_local_public_method (ae : t) name (fi, exported_name) typ =
+      { ae with vars = NameEnv.add name ((PublicMethod (fi, exported_name) : varloc), typ) ae.vars }
+
+  let add_local_const (ae : t) name cv typ =
+      { ae with vars = NameEnv.add name ((Const cv : varloc), typ) ae.vars }
+
+  let add_local_local env (ae : t) name sr i typ =
+      { ae with vars = NameEnv.add name ((Local (sr, i)), typ) ae.vars }
+
+  let add_direct_local env (ae : t) name sr typ =
+      let i = E.add_anon_local env (SR.to_var_type sr) in
+      E.add_local_name env i name;
+      (add_local_local env ae name sr i typ, i)
+
+  (* Adds the names to the environment and returns a list of setters *)
+  let rec add_arguments env (ae : t) as_local = function
+    | [] -> ae
+    | ((name, typ) :: remainder) ->
+      if as_local name then
+        let i = E.add_anon_local env I64Type in
+        E.add_local_name env i name;
+        let ae' = { ae with vars = NameEnv.add name ((Local (SR.Vanilla, i)), typ) ae.vars } in
+        add_arguments env ae' as_local remainder
+      else
+        let index = MutBox.add_global_mutbox env in
+        let ae' = add_static_variable ae name index typ in
+        add_arguments env ae' as_local remainder
+
+  let add_argument_locals env (ae : t) =
+    add_arguments env ae (fun _ -> true)
+
+  let add_label (ae : t) name (d : G.depth) =
+      { ae with labels = NameEnv.add name d ae.labels }
+
+  let get_label_depth (ae : t) name : G.depth  =
+    match NameEnv.find_opt name ae.labels with
+      | Some d -> d
+      | None   -> raise (CodegenError (Printf.sprintf "Could not find %s\n" name))
+
+end (* VarEnv *)
+
+(* type for wrapping code with context, context is establishment
+   of (pattern) binding, argument is the code using the binding,
+   result is e.g. the code for `case p e`. *)
+type scope_wrap = G.t -> G.t
+
+let unmodified : scope_wrap = fun code -> code
+
+let rec can_be_pointer typ nested_optional =
+  Type.(match normalize typ with
+  | Mut t -> (can_be_pointer t nested_optional)
+  | Opt t -> (if nested_optional then true else (can_be_pointer t true))
+  | Prim (Null| Bool | Char | Nat8 | Nat16 | Int8 | Int16) | Non | Tup [] -> false
+  | _ -> true)
+
+let potential_pointer typ : bool =
+  (* must not eliminate nested optional types as they refer to a heap object for ??null, ???null etc. *)
+  can_be_pointer typ false
+
+module Var = struct
+  (* This module is all about looking up Motoko variables in the environment,
+     and dealing with mutable variables *)
+
+  open VarEnv
+
+  (* Returns desired stack representation, preparation code and code to consume
+     the value onto the stack *)
+  let set_val env ae var : G.t * SR.t * G.t = match VarEnv.lookup ae var with
+    | Some ((Local (sr, i)), _) ->
+      G.nop,
+      sr,
+      G.i (LocalSet (nr i))
+    | Some ((HeapInd i), typ) when potential_pointer typ ->
+      G.i (LocalGet (nr i)) ^^
+      Tagged.load_forwarding_pointer env ^^
+      compile_add_const ptr_unskew ^^
+      compile_add_const (Int64.mul MutBox.field Heap.word_size),
+      SR.Vanilla,
+      Tagged.write_with_barrier env
+    | Some ((HeapInd i), typ) ->
+      G.i (LocalGet (nr i)),
+      SR.Vanilla,
+      MutBox.store_field env
+    | Some ((Static index), typ) when potential_pointer typ ->
+      Heap.get_static_variable env index ^^
+      Tagged.load_forwarding_pointer env ^^
+      compile_add_const ptr_unskew ^^
+      compile_add_const (Int64.mul MutBox.field Heap.word_size),
+      SR.Vanilla,
+      Tagged.write_with_barrier env
+    | Some ((Static index), typ) ->
+      Heap.get_static_variable env index,
+      SR.Vanilla,
+      MutBox.store_field env
+    | Some ((Const _), _) -> fatal "set_val: %s is const" var
+    | Some ((PublicMethod _), _) -> fatal "set_val: %s is PublicMethod" var
+    | None -> fatal "set_val: %s missing" var
+
+  (* Stores the payload. Returns stack preparation code, and code that consumes the values from the stack *)
+  let set_val_vanilla env ae var : G.t * G.t =
+    let pre_code, sr, code = set_val env ae var in
+    pre_code, StackRep.adjust env SR.Vanilla sr ^^ code
+
+  (* Stores the payload (which is found on the stack, in Vanilla stackrep) *)
+  let set_val_vanilla_from_stack env ae var : G.t =
+    let pre_code, code = set_val_vanilla env ae var in
+    if G.is_nop pre_code
+    then code
+    else
+      (* Need to shuffle the stack entries *)
+      let (set_x, get_x) = new_local env "var_scrut" in
+      set_x ^^
+      pre_code ^^
+      get_x ^^
+      code
+
+  (* Returns the payload (optimized representation) *)
+  let get_val (env : E.t) (ae : VarEnv.t) var = match VarEnv.lookup_var ae var with
+    | Some (Local (sr, i)) ->
+      sr, G.i (LocalGet (nr i))
+    | Some (HeapInd i) ->
+      SR.Vanilla, G.i (LocalGet (nr i)) ^^ MutBox.load_field env
+    | Some (Static index) ->
+      SR.Vanilla, 
+      Heap.get_static_variable env index ^^
+      MutBox.load_field env
+    | Some (Const c) ->
+      SR.Const c, G.nop
+    | Some (PublicMethod (_, name)) ->
+      SR.Vanilla,
+      IC.get_self_reference env ^^
+      IC.actor_public_field env name
+    | None -> assert false
+
+  (* Returns the payload (vanilla representation) *)
+  let get_val_vanilla (env : E.t) (ae : VarEnv.t) var =
+    let sr, code = get_val env ae var in
+    code ^^ StackRep.adjust env sr SR.Vanilla
+
+  (* Returns the value to put in the closure,
+     and code to restore it, including adding to the environment
+  *)
+  let capture old_env ae0 var : G.t * (E.t -> VarEnv.t -> VarEnv.t * scope_wrap) =
+    match VarEnv.lookup ae0 var with
+    | Some ((Local (sr, i)), typ) ->
+      ( G.i (LocalGet (nr i)) ^^ StackRep.adjust old_env sr SR.Vanilla
+      , fun new_env ae1 ->
+        (* we use SR.Vanilla in the restored environment. We could use sr;
+           like for parameters hard to predict what’s better *)
+        let ae2, j = VarEnv.add_direct_local new_env ae1 var SR.Vanilla typ in
+        let restore_code = G.i (LocalSet (nr j))
+        in ae2, fun body -> restore_code ^^ body
+      )
+    | Some ((HeapInd i), typ) ->
+      ( G.i (LocalGet (nr i))
+      , fun new_env ae1 ->
+        let ae2, j = VarEnv.add_local_with_heap_ind new_env ae1 var typ in
+        let restore_code = G.i (LocalSet (nr j))
+        in ae2, fun body -> restore_code ^^ body
+      )
+    | _ -> assert false
+
+  (* This is used when putting a mutable field into an object.
+     In the IR, mutable fields of objects are pre-allocated as MutBox objects,
+     to allow the async/await.
+     So we expect the variable to be in a HeapInd (pointer to MutBox on the heap),
+     or Static (static variable represented as a MutBox that is accessed via the 
+     runtime system) and we use the pointer.
+  *)
+  let get_aliased_box env ae var = match VarEnv.lookup_var ae var with
+    | Some (HeapInd i) -> G.i (LocalGet (nr i))
+    | Some (Static index) -> Heap.get_static_variable env index
+    | _ -> assert false
+
+  let capture_aliased_box env ae var = match VarEnv.lookup_var ae var with
+    | Some (HeapInd i) ->
+      G.i (LocalSet (nr i))
+    | _ -> assert false
+
+end (* Var *)
+
+(* Calling well-known prelude functions *)
+(* FIXME: calling into the prelude will not work if we ever need to compile a program
+   that requires top-level cps conversion;
+   use new prims instead *)
+module Internals = struct
+  let call_prelude_function env ae var =
+    match VarEnv.lookup_var ae var with
+    | Some (VarEnv.Const Const.Fun (_, mk_fi, _)) ->
+       compile_unboxed_zero ^^ (* A dummy closure *)
+       G.i (Call (nr (mk_fi())))
+    | _ -> assert false
+
+  let add_cycles env ae = call_prelude_function env ae "@add_cycles"
+  let reset_cycles env ae = call_prelude_function env ae "@reset_cycles"
+  let reset_refund env ae = call_prelude_function env ae "@reset_refund"
+end
+
+(* This comes late because it also deals with messages *)
+module FuncDec = struct
+  let bind_args env ae0 first_arg args =
+    let rec go i ae = function
+    | [] -> ae
+    | a::args ->
+      (* Function arguments are always vanilla, due to subtyping and uniform representation.
+         We keep them as such here for now. We _could_ always unpack those that can be unpacked
+         (Nat32 etc.). It is generally hard to predict which strategy is better. *)
+      let ae' = VarEnv.add_local_local env ae a.it SR.Vanilla (Int32.of_int i) a.note in
+      go (i+1) ae' args in
+    go first_arg ae0 args
+
+  (* Create a WebAssembly func from a pattern (for the argument) and the body.
+   Parameter `captured` should contain the, well, captured local variables that
+   the function will find in the closure. *)
+  let compile_local_function outer_env outer_ae restore_env args mk_body ret_tys at =
+    let arg_names = List.map (fun a -> a.it, I64Type) args in
+    let return_arity = List.length ret_tys in
+    let retty = Lib.List.make return_arity I64Type in
+    let ae0 = VarEnv.mk_fun_ae outer_ae in
+    Func.of_body outer_env (["clos", I64Type] @ arg_names) retty (fun env -> G.with_region at (
+      let get_closure = G.i (LocalGet (nr 0l)) ^^ Tagged.load_forwarding_pointer env in
+
+      let ae1, closure_codeW = restore_env env ae0 get_closure in
+
+      (* Add arguments to the environment (shifted by 1) *)
+      let ae2 = bind_args env ae1 1 args in
+
+      closure_codeW (mk_body env ae2)
+    ))
+
+  let message_start env sort = match sort with
+      | Type.(Shared Write) ->
+        Lifecycle.(trans env InUpdate)
+      | Type.(Shared Query) ->
+        Lifecycle.(trans env InQuery)
+      | Type.(Shared Composite) ->
+        Lifecycle.(trans env InComposite)
+      | _ -> assert false
+
+  let message_cleanup env sort = match sort with
+      | Type.(Shared Write) ->
+        Lifecycle.get env ^^
+        compile_eq_const (Lifecycle.(int_of_state InStabilization)) ^^
+        Lifecycle.get env ^^
+        compile_eq_const (Lifecycle.(int_of_state InDestabilization)) ^^
+        G.i (Binary (Wasm_exts.Values.I64 I64Op.Or)) ^^
+        E.if0
+          G.nop
+          begin
+            GC.collect_garbage env ^^
+            Lifecycle.(trans env Idle)
+          end
+      | Type.Shared Type.Query ->
+        Lifecycle.(trans env PostQuery)
+      | Type.Shared Type.Composite ->
+        (* Stay in composite query state such that callbacks of 
+        composite queries can also use the memory reserve. 
+        The state is isolated since memory changes of queries 
+        are rolled back by the IC runtime system. *)
+        Lifecycle.(trans env InComposite)
+      | _ -> assert false
+
+  let callback_start env =
+    Lifecycle.(is_in env InComposite) ^^
+    E.if0
+      (G.nop)
+      (message_start env Type.(Shared Write))
+
+  let callback_cleanup env =
+    Lifecycle.(is_in env InComposite) ^^
+    E.if0
+      (G.nop)
+      (message_cleanup env (Type.Shared Type.Write))
+
+  let compile_const_message outer_env outer_ae sort control args mk_body ret_tys at : E.func_with_names =
+    let ae0 = VarEnv.mk_fun_ae outer_ae in
+    Func.of_body outer_env [] [] (fun env -> G.with_region at (
+      message_start env sort ^^
+      (* cycles *)
+      Internals.reset_cycles env outer_ae ^^
+      Internals.reset_refund env outer_ae ^^
+      (* reply early for a oneway *)
+      (if control = Type.Returns
+       then
+         Tuple.compile_unit env ^^
+         Serialization.serialize env [] ^^
+         IC.reply_with_data env
+       else G.nop) ^^
+      (* Deserialize argument and add params to the environment *)
+      let arg_list = List.map (fun a -> (a.it, a.note)) args in
+      let arg_names = List.map (fun a -> a.it) args in
+      let arg_tys = List.map (fun a -> a.note) args in
+      let ae1 = VarEnv.add_argument_locals env ae0 arg_list in
+      Serialization.deserialize env arg_tys ^^
+      G.concat_map (Var.set_val_vanilla_from_stack env ae1) (List.rev arg_names) ^^
+      mk_body env ae1 ^^
+      message_cleanup env sort
+    ))
+
+  (* Compile a closed function declaration (captures no local variables) *)
+  let closed pre_env sort control name args mk_body fun_rhs ret_tys at =
+    if Type.is_shared_sort sort
+    then begin
+      let (fi, fill) = E.reserve_fun pre_env name in
+      ( Const.Message fi, fun env ae ->
+        fill (compile_const_message env ae sort control args mk_body ret_tys at)
+      )
+    end else begin
+      assert (control = Type.Returns);
+      let lf = E.make_lazy_function pre_env name in
+      let fun_id = E.get_constant_function_id pre_env in
+      ( Const.Fun (fun_id, (fun () -> Lib.AllocOnUse.use lf), fun_rhs), fun env ae ->
+        let restore_no_env _env ae _ = ae, unmodified in
+        Lib.AllocOnUse.def lf (lazy (compile_local_function env ae restore_no_env args mk_body ret_tys at))
+      )
+    end
+
+  (* Compile a closure declaration (captures local variables) *)
+  let closure env ae sort control name captured args mk_body ret_tys at =
+      let is_local = sort = Type.Local in
+
+      let set_clos, get_clos = new_local env (name ^ "_clos") in
+
+      let len = Wasm.I64.of_int_u (List.length captured) in
+      let store_env, restore_env =
+        let rec go i = function
+          | [] -> (G.nop, fun _env ae1 _ -> ae1, unmodified)
+          | (v::vs) ->
+              let store_rest, restore_rest = go (i + 1) vs in
+              let store_this, restore_this = Var.capture env ae v in
+              let store_env =
+                get_clos ^^
+                store_this ^^
+                Closure.store_data env (Wasm.I64.of_int_u i) ^^
+                store_rest in
+              let restore_env env ae1 get_env =
+                let ae2, codeW = restore_this env ae1 in
+                let ae3, code_restW = restore_rest env ae2 get_env in
+                (ae3,
+                 fun body ->
+                 get_env ^^
+                 Closure.load_data env (Wasm.I64.of_int_u i) ^^
+                 codeW (code_restW body)
+                )
+              in store_env, restore_env in
+        go 0 captured in
+
+      let f =
+        if is_local
+        then compile_local_function env ae restore_env args mk_body ret_tys at
+        else assert false (* no first class shared functions yet *) in
+
+      let fi = E.add_fun env name f in
+
+      let code =
+        (* Allocate a heap object for the closure *)
+        Tagged.alloc env (Int64.add Closure.header_size len) Tagged.Closure ^^
+        set_clos ^^
+
+        (* Store the function pointer number: *)
+        get_clos ^^
+        compile_unboxed_const (Wasm.I64_convert.extend_i32_u (E.add_fun_ptr env fi)) ^^
+        Tagged.store_field env Closure.funptr_field ^^
+
+        (* Store the length *)
+        get_clos ^^
+        compile_unboxed_const len ^^
+        Tagged.store_field env Closure.len_field ^^
+
+        (* Store all captured values *)
+        store_env ^^
+
+        get_clos ^^
+        Tagged.allocation_barrier env ^^
+        G.i Drop
+      in
+
+      if is_local
+      then
+        SR.Vanilla,
+        code ^^
+        get_clos
+      else assert false (* no first class shared functions *)
+
+  let lit env ae name sort control free_vars args mk_body ret_tys at =
+    let captured = List.filter (VarEnv.needs_capture ae) free_vars in
+
+    if ae.VarEnv.lvl = VarEnv.TopLvl then assert (captured = []);
+
+    if captured = []
+    then
+      let (ct, fill) = closed env sort control name args mk_body Const.Complicated ret_tys at in
+      fill env ae;
+      (SR.Const ct, G.nop)
+    else closure env ae sort control name captured args mk_body ret_tys at
+
+  (* Returns a closure corresponding to a future (async block) *)
+  let async_body env ae ts free_vars mk_body at =
+    (* We compile this as a local, returning function, so set return type to [] *)
+    let sr, code = lit env ae "anon_async" Type.Local Type.Returns free_vars [] mk_body [] at in
+    code ^^
+    StackRep.adjust env sr SR.Vanilla
+
+  (* Takes the reply and reject callbacks, tuples them up (with administrative extras),
+     adds them to the continuation table, and returns the two callbacks expected by
+     ic.call_new.
+
+     The tupling is necessary because we want to free _both_/_all_ closures
+     when the call is answered.
+
+     The reply callback function exists once per type (as it has to do
+     deserialization); the reject callback function is unique.
+  *)
+
+  let closures_to_reply_reject_callbacks_aux env ts_opt =
+    let arity, reply_name, from_arg_data =
+      match ts_opt with
+      | Some ts ->
+        (List.length ts,
+         "@callback<" ^ Typ_hash.typ_hash (Type.Tup ts) ^ ">",
+         fun env -> Serialization.deserialize env ts)
+      | None ->
+        (1,
+         "@callback",
+         (fun env ->
+           Blob.of_size_copy env Tagged.B
+           (fun env -> 
+            IC.system_call env "msg_arg_data_size")
+           (fun env -> 
+            IC.system_call env "msg_arg_data_copy")
+           (fun env -> compile_unboxed_const 0L)))
+    in
+    Func.define_built_in env reply_name ["env", I64Type] [] (fun env ->
+        callback_start env ^^
+        (* Look up continuation *)
+        let (set_closure, get_closure) = new_local env "closure" in
+        G.i (LocalGet (nr 0l)) ^^
+        ContinuationTable.recall env ^^
+        Arr.load_field env 0L ^^ (* get the reply closure *)
+        set_closure ^^
+        get_closure ^^
+        Closure.prepare_closure_call env ^^
+
+        (* Deserialize/Blobify reply arguments  *)
+        from_arg_data env ^^
+
+        get_closure ^^
+        Closure.call_closure env arity 0 ^^
+
+        callback_cleanup env
+      );
+
+    let reject_name = "@reject_callback" in
+    Func.define_built_in env reject_name ["env", I64Type] [] (fun env ->
+        callback_start env ^^
+        (* Look up continuation *)
+        let (set_closure, get_closure) = new_local env "closure" in
+        G.i (LocalGet (nr 0l)) ^^
+        ContinuationTable.recall env ^^
+        Arr.load_field env 1L ^^ (* get the reject closure *)
+        set_closure ^^
+        get_closure ^^
+        Closure.prepare_closure_call env ^^
+        (* Synthesize value of type `Text`, the error message
+           (The error code is fetched via a prim)
+        *)
+        IC.error_value env ^^
+
+        get_closure ^^
+        Closure.call_closure env 1 0 ^^
+
+        callback_cleanup env
+      );
+
+    (* result is a function that accepts a list of closure getters, from which
+       the first and second must be the reply and reject continuations. *)
+    fun closure_getters ->
+      let set_cb_index, get_cb_index = new_local env "cb_index" in
+      Arr.lit env Tagged.T closure_getters ^^
+      ContinuationTable.remember env ^^
+      set_cb_index ^^
+
+      (* return arguments for the ic.call *)
+      compile_unboxed_const (Wasm.I64_convert.extend_i32_u (E.add_fun_ptr env (E.built_in env reply_name))) ^^
+      get_cb_index ^^
+      compile_unboxed_const (Wasm.I64_convert.extend_i32_u (E.add_fun_ptr env (E.built_in env reject_name))) ^^
+      get_cb_index
+
+  let closures_to_reply_reject_callbacks env ts =
+    closures_to_reply_reject_callbacks_aux env (Some ts)
+  let closures_to_raw_reply_reject_callbacks env  =
+    closures_to_reply_reject_callbacks_aux env None
+
+  let ignoring_callback env =
+    (* for one-way calls, we use an invalid table entry as the callback. this
+       way, the callback, when it comes back, will (safely) trap, even if the
+       module has completely changed in between. This way, one-way calls do not
+       get in the way of safe instantaneous upgrades *)
+    compile_unboxed_const 0xFFFF_FFFFL (* IC does not support -1 in 64-bit, thus use zero-extended 32-bit -1 *)
+
+  let cleanup_callback env =
+    let name = "@cleanup_callback" in
+    Func.define_built_in env name ["env", I64Type] [] (fun env ->
+        G.i (LocalGet (nr 0l)) ^^
+        ContinuationTable.recall env ^^
+        Arr.load_field env 2L ^^ (* get the cleanup closure *)
+        let set_closure, get_closure = new_local env "closure" in
+        set_closure ^^ get_closure ^^
+        Closure.prepare_closure_call env ^^
+        get_closure ^^
+        Closure.call_closure env 0 0);
+    compile_unboxed_const (Wasm.I64_convert.extend_i32_u (E.add_fun_ptr env (E.built_in env name)))
+
+  let ic_call_threaded env purpose get_meth_pair push_continuations
+    add_data add_cycles =
+    match E.mode env with
+    | Flags.ICMode
+    | Flags.RefMode ->
+      let message = Printf.sprintf "could not perform %s" purpose in
+      let (set_cb_index, get_cb_index) = new_local env "cb_index" in
+      (* The callee *)
+      get_meth_pair ^^ Arr.load_field env 0L ^^ Blob.as_ptr_len env ^^
+      (* The method name *)
+      get_meth_pair ^^ Arr.load_field env 1L ^^ Blob.as_ptr_len env ^^
+      (* The reply and reject callback *)
+      push_continuations ^^
+      set_cb_index ^^ get_cb_index ^^
+      (* initiate call *)
+      IC.system_call env "call_new" ^^
+      cleanup_callback env ^^ 
+      get_cb_index ^^
+      IC.system_call env "call_on_cleanup" ^^
+      (* the data *)
+      add_data get_cb_index ^^
+      IC.system_call env "call_data_append" ^^
+      (* the cycles *)
+      add_cycles ^^
+      (* done! *)
+      IC.system_call env "call_perform" ^^
+      G.i (Convert (Wasm_exts.Values.I64 I64Op.ExtendUI32)) ^^
+      IC.set_call_perform_status env ^^
+      Blob.lit env Tagged.T message ^^
+      IC.set_call_perform_message env ^^
+      IC.get_call_perform_status env ^^
+      compile_unboxed_const 0L ^^
+      compile_comparison I64Op.Ne ^^
+      (* save error code, cleanup on error *)
+      E.if0
+      begin (* send failed *)
+        if !Flags.trap_on_call_error then
+          E.trap_with env message
+        else
+        (* Recall (don't leak) continuations *)
+        get_cb_index ^^
+        ContinuationTable.recall env ^^
+        G.i Drop
+      end
+      begin (* send succeeded *)
+        G.nop
+      end
+    | _ ->
+      E.trap_with env (Printf.sprintf "cannot perform %s when running locally" purpose)
+
+  let ic_call env ts1 ts2 get_meth_pair get_arg get_k get_r get_c =
+    ic_call_threaded
+      env
+      "remote call"
+      get_meth_pair
+      (closures_to_reply_reject_callbacks env ts2 [get_k; get_r; get_c])
+      (fun _ -> get_arg ^^ Serialization.serialize env ts1)
+
+  let ic_call_raw env get_meth_pair get_arg get_k get_r get_c =
+    ic_call_threaded
+      env
+      "raw call"
+      get_meth_pair
+      (closures_to_raw_reply_reject_callbacks env [get_k; get_r; get_c])
+      (fun _ -> get_arg ^^ Blob.as_ptr_len env)
+
+  let ic_self_call env ts get_meth_pair get_future get_k get_r get_c =
+    ic_call_threaded
+      env
+      "self call"
+      get_meth_pair
+      (* Storing the tuple away, future_array_index = 3, keep in sync with rts/continuation_table.rs *)
+      (closures_to_reply_reject_callbacks env ts [get_k; get_r; get_c; get_future])
+      (fun get_cb_index ->
+        get_cb_index ^^
+        TaggedSmallWord.msb_adjust Type.Nat32 ^^
+        Serialization.serialize env Type.[Prim Nat32])
+
+  let ic_call_one_shot env ts get_meth_pair get_arg add_cycles =
+    match E.mode env with
+    | Flags.ICMode
+    | Flags.RefMode ->
+      (* The callee *)
+      get_meth_pair ^^ Arr.load_field env 0L ^^ Blob.as_ptr_len env ^^
+      (* The method name *)
+      get_meth_pair ^^ Arr.load_field env 1L ^^ Blob.as_ptr_len env ^^
+      (* The reply callback *)
+      ignoring_callback env ^^
+      compile_unboxed_const 0L ^^
+      (* The reject callback *)
+      ignoring_callback env ^^
+      compile_unboxed_const 0L ^^
+      IC.system_call env "call_new" ^^
+      (* the data *)
+      get_arg ^^ Serialization.serialize env ts ^^
+      IC.system_call env "call_data_append" ^^
+      (* the cycles *)
+      add_cycles ^^
+      IC.system_call env "call_perform" ^^
+      G.i (Convert (Wasm_exts.Values.I64 I64Op.ExtendUI32)) ^^
+      (* This is a one-shot function: just remember error code *)
+      (if !Flags.trap_on_call_error then
+         (* legacy: discard status, proceed as if all well *)
+         G.i Drop ^^
+         compile_unboxed_zero ^^
+         IC.set_call_perform_status env ^^
+         Blob.lit env Tagged.T "" ^^
+         IC.set_call_perform_message env
+       else
+         IC.set_call_perform_status env ^^
+         Blob.lit env Tagged.T "could not perform oneway" ^^
+         IC.set_call_perform_message env)
+
+    | _ -> assert false
+
+  let equate_msgref env =
+    let (set_meth_pair1, get_meth_pair1) = new_local env "meth_pair1" in
+    let (set_meth_pair2, get_meth_pair2) = new_local env "meth_pair2" in
+    set_meth_pair2 ^^ set_meth_pair1 ^^
+    get_meth_pair1 ^^ Arr.load_field env 0L ^^
+    get_meth_pair2 ^^ Arr.load_field env 0L ^^
+    Blob.compare env (Some Operator.EqOp) ^^
+    E.if1 I64Type
+    begin
+      get_meth_pair1 ^^ Arr.load_field env 1L ^^
+      get_meth_pair2 ^^ Arr.load_field env 1L ^^
+      Blob.compare env (Some Operator.EqOp)
+    end
+    begin
+      Bool.lit false
+    end
+
+  let export_async_method env =
+    let name = IC.async_method_name in
+    begin match E.mode env with
+    | Flags.ICMode | Flags.RefMode ->
+      Func.define_built_in env name [] [] (fun env ->
+        let (set_closure, get_closure) = new_local env "closure" in
+
+        message_start env (Type.Shared Type.Write) ^^
+
+        (* Check that we are calling this *)
+        IC.assert_caller_self env ^^
+
+        (* Deserialize and look up continuation argument *)
+        Serialization.deserialize env Type.[Prim Nat32] ^^
+        TaggedSmallWord.lsb_adjust Type.Nat32 ^^
+        ContinuationTable.peek_future env ^^
+        set_closure ^^
+        get_closure ^^
+        Closure.prepare_closure_call env ^^
+        get_closure ^^
+        Closure.call_closure env 0 0 ^^
+        message_cleanup env (Type.Shared Type.Write)
+      );
+
+      let fi = E.built_in env name in
+      E.add_export env (nr {
+        name = Lib.Utf8.decode ("canister_update " ^ name);
+        edesc = nr (FuncExport (nr fi))
+      })
+    | _ -> ()
+    end
+
+  let export_gc_trigger_method env =
+    let name = IC.gc_trigger_method_name in
+    begin match E.mode env with
+    | Flags.ICMode | Flags.RefMode ->
+      Func.define_built_in env name [] [] (fun env ->
+        (* THe GC trigger is also blocked during incremental (de)stabilization. 
+           This is checked in `Lifecycle.trans` being called by `message_start` *)
+        message_start env (Type.Shared Type.Write) ^^
+        (* Check that we are called from this or a controller, w/o allocation *)
+        IC.assert_caller_self_or_controller env ^^
+        (* To avoid more failing allocation, don't deserialize args nor serialize reply,
+           i.e. don't even try to do this:
+        Serialization.deserialize env [] ^^
+        Tuple.compile_unit env ^^
+        Serialization.serialize env [] ^^
+        *)
+        (* Instead, just ignore the argument and
+           send a *statically* allocated, nullary reply *)
+        IC.static_nullary_reply env ^^
+        (* Finally, act like
+        message_cleanup env (Type.Shared Type.Write)
+           but *force* collection *)
+        GC.record_mutator_instructions env ^^
+        E.collect_garbage env true ^^
+        GC.record_collector_instructions env ^^
+        Lifecycle.trans env Lifecycle.Idle
+      );
+
+      let fi = E.built_in env name in
+      E.add_export env (nr {
+        name = Lib.Utf8.decode ("canister_update " ^ name);
+        edesc = nr (FuncExport (nr fi))
+      })
+    | _ -> ()
+    end
+
+  let export_stabilization_limits env =
+    let moc_stabilization_instruction_limit_fi = 
+      E.add_fun env "moc_stabilization_instruction_limit" (
+        Func.of_body env [] [I64Type] (fun env ->
+          (* To use the instruction budget well during upgrade, 
+             offer the entire upgrade instruction limit for the destabilization, 
+             since the stabilization can also be run before the upgrade. *)
+          Lifecycle.during_explicit_upgrade env ^^
+          E.if1 I64Type
+            (compile_unboxed_const (Int64.of_int Flags.(!stabilization_instruction_limit.update_call)))
+            (compile_unboxed_const (Int64.of_int Flags.(!stabilization_instruction_limit.upgrade)))
+        )
+      ) in
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "moc_stabilization_instruction_limit";
+      edesc = nr (FuncExport (nr moc_stabilization_instruction_limit_fi))
+    });
+    let moc_stable_memory_access_limit_fi = 
+      E.add_fun env "moc_stable_memory_access_limit" (
+        Func.of_body env [] [I64Type] (fun env ->
+          Lifecycle.during_explicit_upgrade env ^^
+          E.if1 I64Type
+            (compile_unboxed_const (Int64.of_int Flags.(!stable_memory_access_limit.update_call)))
+            (compile_unboxed_const (Int64.of_int Flags.(!stable_memory_access_limit.upgrade)))
+        )
+      ) in
+    E.add_export env (nr {
+      name = Lib.Utf8.decode "moc_stable_memory_access_limit";
+      edesc = nr (FuncExport (nr moc_stable_memory_access_limit_fi))
+    }) 
+
+end (* FuncDec *)
+
+module IncrementalGraphStabilization = struct
+  let register_globals env =
+    E.add_global64 env "__stabilization_completed" Mutable 0L;
+    E.add_global64 env "__destabilized_actor" Mutable 0L
+
+  let is_stabilization_completed env =
+    G.i (GlobalGet (nr (E.get_global env "__stabilization_completed")))
+  let set_stabilization_completed env =
+    G.i (GlobalSet (nr (E.get_global env "__stabilization_completed")))
+
+  let get_destabilized_actor env =
+    G.i (GlobalGet (nr (E.get_global env "__destabilized_actor")))
+  let set_destabilized_actor env =
+    G.i (GlobalSet (nr (E.get_global env "__destabilized_actor")))
+
+  let async_stabilization_method_name = "@motoko_async_stabilization"
+
+  let async_stabilization_reply_callback_name = "@async_stabilization_reply_callback"
+  let async_stabilization_reply_callback env =
+    Int64.of_int32 (E.add_fun_ptr env (E.built_in env async_stabilization_reply_callback_name))
+
+  let async_stabilization_reject_callback_name = "@async_stabilization_reject_callback"
+  let async_stabilization_reject_callback env =
+    Int64.of_int32 (E.add_fun_ptr env (E.built_in env async_stabilization_reject_callback_name))
+
+  let call_async_stabilization env =
+    IC.get_self_reference env ^^ Blob.as_ptr_len env ^^
+    Blob.lit_ptr_len env Tagged.T async_stabilization_method_name ^^
+    compile_unboxed_const (async_stabilization_reply_callback env) ^^ compile_unboxed_const 0L ^^
+    compile_unboxed_const (async_stabilization_reject_callback env) ^^ compile_unboxed_const 0L ^^
+    IC.system_call env "call_new" ^^
+    IC.system_call env "call_perform" ^^
+    G.i (Convert (Wasm_exts.Values.I64 I64Op.ExtendUI32)) ^^
+    E.then_trap_with env "Async stabilization increment call failed" 
+
+  let define_async_stabilization_reply_callback env =
+    Func.define_built_in env async_stabilization_reply_callback_name ["env", I64Type] [] (fun env ->
+      is_stabilization_completed env ^^
+      E.if0
+        begin
+          (* Sucessful completion of the async stabilization sequence. *)
+          IC.static_nullary_reply env
+          (* Skip garbage collection. *)
+          (* Stay in lifecycle state `InStabilization`. *)
+        end
+        begin
+          (* Trigger next async stabilization increment. *)
+          call_async_stabilization env
+        end)
+  
+  let define_async_stabilization_reject_callback env =
+    Func.define_built_in env async_stabilization_reject_callback_name ["env", I64Type] [] (fun env ->
+      IC.error_message env ^^
+      Blob.as_ptr_len env ^^
+      IC.system_call env "msg_reject")
+
+  let export_async_stabilization_method env =
+    let name = async_stabilization_method_name in
+    begin match E.mode env with
+    | Flags.ICMode | Flags.RefMode ->
+      Func.define_built_in env name [] [] (fun env ->
+        IC.assert_caller_self_or_controller env ^^
+        (* All messages are blocked except this method and the upgrade. *)
+        Lifecycle.trans env Lifecycle.InStabilization ^^
+        (* Skip argument deserialization to avoid allocations. *)
+        GraphCopyStabilization.graph_stabilization_increment env ^^
+        set_stabilization_completed env ^^
+        IC.static_nullary_reply env
+        (* Skip garbage collection. *)
+        (* Stay in lifecycle state `InStabilization`. *)
+      );
+
+      let fi = E.built_in env name in
+      E.add_export env (nr {
+        name = Lib.Utf8.decode ("canister_update " ^ name);
+        edesc = nr (FuncExport (nr fi))
+      })
+    | _ -> ()
+    end
+
+  let start_graph_stabilization env actor_type =
+    GraphCopyStabilization.is_graph_stabilization_started env ^^
+    (E.if0
+      G.nop
+      begin
+        (* Extra safety measure stopping the GC during incremental stabilization, 
+           although it should not be called in lifecycle state `InStabilization`. *)
+        E.call_import env "rts" "stop_gc_before_stabilization" ^^
+        IC.get_actor_to_persist env ^^
+        GraphCopyStabilization.start_graph_stabilization env actor_type
+      end)
+
+  let export_stabilize_before_upgrade_method env actor_type =
+    let name = "__motoko_stabilize_before_upgrade" in
+    begin match E.mode env with
+    | Flags.ICMode | Flags.RefMode ->
+      Func.define_built_in env name [] [] (fun env ->
+        IC.assert_caller_self_or_controller env ^^
+        (* All messages are blocked except this method and the upgrade. *)
+        Lifecycle.trans env Lifecycle.InStabilization ^^
+        start_graph_stabilization env actor_type ^^
+        call_async_stabilization env
+        (* Stay in lifecycle state `InStabilization`. *)
+      );
+
+      let fi = E.built_in env name in
+      E.add_export env (nr {
+        name = Lib.Utf8.decode ("canister_update " ^ name);
+        edesc = nr (FuncExport (nr fi))
+      })
+    | _ -> ()
+    end
+
+  let complete_stabilization_on_upgrade env actor_type =
+    start_graph_stabilization env actor_type ^^
+    G.loop0
+    begin
+      GraphCopyStabilization.graph_stabilization_increment env ^^
+      E.if0 
+        G.nop
+        (G.i (Br (nr 1l)))
+    end
+  
+  let async_destabilization_method_name = "@motoko_async_destabilization"
+
+  let async_destabilization_reply_callback_name = "@async_destabilization_reply_callback"
+  let async_destabilization_reply_callback env =
+    Int64.of_int32 (E.add_fun_ptr env (E.built_in env async_destabilization_reply_callback_name))
+
+  let async_destabilization_reject_callback_name = "@async_destabilization_reject_callback"
+  let async_destabilization_reject_callback env =
+    Int64.of_int32 (E.add_fun_ptr env (E.built_in env async_destabilization_reject_callback_name))
+
+  let call_async_destabilization env =
+    IC.get_self_reference env ^^ Blob.as_ptr_len env ^^
+    Blob.lit_ptr_len env Tagged.T async_destabilization_method_name ^^
+    compile_unboxed_const (async_destabilization_reply_callback env) ^^ compile_unboxed_const 0L ^^
+    compile_unboxed_const (async_destabilization_reject_callback env) ^^ compile_unboxed_const 0L ^^
+    IC.system_call env "call_new" ^^
+    IC.system_call env "call_perform" ^^
+    G.i (Convert (Wasm_exts.Values.I64 I64Op.ExtendUI32)) ^^
+    E.then_trap_with env "Async destabilization increment call failed"
+
+  let complete_graph_destabilization env = 
+    IC.initialize_main_actor env ^^
+    (* Allow other messages and allow garbage collection. *)
+    E.call_import env "rts" "start_gc_after_destabilization" ^^
+    Lifecycle.trans env Lifecycle.Idle
+
+  let define_async_destabilization_reply_callback env =
+    Func.define_built_in env async_destabilization_reply_callback_name ["env", I64Type] [] (fun env ->
+      get_destabilized_actor env ^^
+      compile_test I64Op.Eqz ^^
+      E.if0
+        begin
+          (* Trigger next async destabilization increment. *)
+          call_async_destabilization env
+        end
+        begin
+          (* Send static reply of sucessful async destabilization sequence. *)
+          IC.static_nullary_reply env
+          (* Stay in lifecycle state `InDestabilization`. *)
+        end)
+  
+  let define_async_destabilization_reject_callback env =
+    Func.define_built_in env async_destabilization_reject_callback_name ["env", I64Type] [] (fun env ->
+      IC.error_message env ^^
+      Blob.as_ptr_len env ^^
+      IC.system_call env "msg_reject")
+
+  let destabilization_increment env actor_type =
+    get_destabilized_actor env ^^
+    compile_test I64Op.Eqz ^^
+    (E.if0
+      begin
+        GraphCopyStabilization.graph_destabilization_increment env ^^
+        (E.if0
+          begin
+            GraphCopyStabilization.get_graph_destabilized_actor env actor_type ^^
+            set_destabilized_actor env ^^
+            complete_graph_destabilization env
+          end
+          G.nop)
+      end
+      G.nop)
+
+  let export_async_destabilization_method env actor_type =
+    let name = async_destabilization_method_name in
+    begin match E.mode env with
+    | Flags.ICMode | Flags.RefMode ->
+      Func.define_built_in env name [] [] (fun env ->
+        IC.assert_caller_self_or_controller env ^^
+        (* Stay in lifecycle state `InDestabilization` if not yet completed. *)
+        destabilization_increment env actor_type ^^
+        IC.static_nullary_reply env
+      );
+
+      let fi = E.built_in env name in
+      E.add_export env (nr {
+        name = Lib.Utf8.decode ("canister_update " ^ name);
+        edesc = nr (FuncExport (nr fi))
+      })
+    | _ -> ()
+    end
+
+  let partial_destabilization_on_upgrade env actor_type =
+    (* TODO: Verify that the post_upgrade hook cannot be directly called by the IC *)
+    (* Garbage collection is disabled in `start_graph_destabilization` until destabilization has completed. *)
+    GraphCopyStabilization.start_graph_destabilization env actor_type ^^
+    get_destabilized_actor env ^^
+    compile_test I64Op.Eqz ^^
+    E.if0
+      begin
+        destabilization_increment env actor_type ^^
+        get_destabilized_actor env ^^
+        (E.if0
+          G.nop
+          begin
+            (* All messages remain blocked except this method. *)
+            Lifecycle.trans env Lifecycle.InDestabilization
+            (* Since the canister initialization cannot perform async calls, the destabilization 
+               needs to be explicitly continued by calling `__motoko_destabilize_after_upgrade`. *)
+          end)
+      end
+      G.nop
+
+  let export_destabilize_after_upgrade_method env =
+    let name = "__motoko_destabilize_after_upgrade" in
+    begin match E.mode env with
+    | Flags.ICMode | Flags.RefMode ->
+      Func.define_built_in env name [] [] (fun env ->
+        (* All messages are blocked except this method. *)
+        IC.assert_caller_self_or_controller env ^^
+        (* Skip argument deserialization to avoid allocations. *)
+        call_async_destabilization env
+        (* Stay in lifecycle state `InDestabilization`. *)
+      );
+
+      let fi = E.built_in env name in
+      E.add_export env (nr {
+        name = Lib.Utf8.decode ("canister_update " ^ name);
+        edesc = nr (FuncExport (nr fi))
+      })
+    | _ -> ()
+    end
+  
+  let load env =
+    get_destabilized_actor env ^^
+    compile_test I64Op.Eqz ^^
+    E.then_trap_with env "Destabilization is not yet completed: Call __motoko_destabilize_after_upgrade" ^^
+    get_destabilized_actor env
+    (* Upgrade costs are already record in RTS for graph-copy-based (de-)stabilization. *)
+
+  let define_methods env actor_type =
+    define_async_stabilization_reply_callback env;
+    define_async_stabilization_reject_callback env;
+    export_async_stabilization_method env;
+    export_stabilize_before_upgrade_method env actor_type;
+    define_async_destabilization_reply_callback env;
+    define_async_destabilization_reject_callback env;
+    export_async_destabilization_method env actor_type;
+    export_destabilize_after_upgrade_method env;
+
+end (* IncrementalGraphStabilization *)
+
+module Persistence = struct
+  (* Stable memory version at the time of the canister upgrade or initialization.
+     This version can be different to `StableMem.get_version` because the upgrade logic
+     may update the stable memory version, e.g. lift to enhanced orthogonal persistence. *)
+  let register_globals env =
+    E.add_global64 env "__persistence_version" Mutable 0L;
+    E.add_global64 env "__init_message_payload" Mutable 0L
+
+  let get_persistence_version env =
+    G.i (GlobalGet (nr (E.get_global env "__persistence_version")))
+  let set_persistence_version env =
+    G.i (GlobalSet (nr (E.get_global env "__persistence_version")))
+
+  (* No GC running during destabilization while this global blob reference is used. *)
+  let get_init_message_payload env =
+    G.i (GlobalGet (nr (E.get_global env "__init_message_payload")))
+  let set_init_message_payload env =
+    G.i (GlobalSet (nr (E.get_global env "__init_message_payload")))
+
+  let use_candid_destabilization env =
+    get_persistence_version env ^^
+    compile_unboxed_const StableMem.legacy_version_regions ^^ (* Version 0 to 2 *)
+    compile_comparison I64Op.LeU
+
+  let use_graph_destabilization env =
+    get_persistence_version env ^^
+    compile_eq_const StableMem.version_graph_copy_no_regions ^^
+    get_persistence_version env ^^
+    compile_eq_const StableMem.version_graph_copy_regions ^^
+    G.i (Binary (Wasm_exts.Values.I64 I64Op.Or))
+
+  let use_enhanced_orthogonal_persistence env =
+    get_persistence_version env ^^
+    compile_eq_const StableMem.version_stable_heap_no_regions ^^
+    get_persistence_version env ^^
+    compile_eq_const StableMem.version_stable_heap_regions ^^
+    G.i (Binary (Wasm_exts.Values.I64 I64Op.Or))
+
+  let initialize env actor_type =
+    E.call_import env "rts" "read_persistence_version" ^^
+    set_persistence_version env ^^
+    use_graph_destabilization env ^^
+    E.if0
+      begin
+        IncrementalGraphStabilization.partial_destabilization_on_upgrade env actor_type
+        (* Potentially stay in lifecycle state `InDestabilization` *)
+      end
+      begin
+        IC.initialize_main_actor env ^^
+        Lifecycle.trans env Lifecycle.Idle
+      end
+
+  let load env actor_type =
+    use_enhanced_orthogonal_persistence env ^^
+    (E.if1 I64Type
+      (EnhancedOrthogonalPersistence.load env actor_type)
+      begin
+        use_graph_destabilization env ^^
+        E.if1 I64Type
+          begin
+            IncrementalGraphStabilization.load env ^^
+            NewStableMemory.upgrade_version_from_graph_stabilization env ^^
+            EnhancedOrthogonalPersistence.initialize env actor_type 
+          end
+          begin
+            use_candid_destabilization env ^^
+            E.else_trap_with env "Unsupported persistence version. Use newer Motoko compiler version." ^^
+            OldStabilization.load env actor_type (NewStableMemory.upgrade_version_from_candid env) ^^
+            EnhancedOrthogonalPersistence.initialize env actor_type
+          end
+      end) ^^
+    StableMem.region_init env
+
+  let save env actor_type =
+    GraphCopyStabilization.is_graph_stabilization_started env ^^
+    E.if0
+      (IncrementalGraphStabilization.complete_stabilization_on_upgrade env actor_type)
+      (EnhancedOrthogonalPersistence.save env actor_type)
+end (* Persistence *)
+
+module PatCode = struct
+  (* Pattern failure code on demand.
+
+  Patterns in general can fail, so we want a block around them with a
+  jump-label for the fail case. But many patterns cannot fail, in particular
+  function arguments that are simple variables. In these cases, we do not want
+  to create the block and the (unused) jump label. So we first generate the
+  code, either as plain code (CannotFail) or as code with hole for code to run
+  in case of failure (CanFail).
+  *)
+
+  type patternCode =
+    | CannotFail of G.t
+    | CanFail of (G.t -> G.t)
+
+  let definiteFail = CanFail (fun fail -> fail)
+
+  let (^^^) : patternCode -> patternCode -> patternCode = function
+    | CannotFail is1 ->
+      begin function
+      | CannotFail is2 -> CannotFail (is1 ^^ is2)
+      | CanFail is2 -> CanFail (fun k -> is1 ^^ is2 k)
+      end
+    | CanFail is1 ->
+      begin function
+      | CannotFail is2 -> CanFail (fun k ->  is1 k ^^ is2)
+      | CanFail is2 -> CanFail (fun k -> is1 k ^^ is2 k)
+      end
+
+  let with_fail (fail_code : G.t) : patternCode -> G.t = function
+    | CannotFail is -> is
+    | CanFail is -> is fail_code
+
+  let orElse : patternCode -> patternCode -> patternCode = function
+    | CannotFail is1 -> fun _ -> CannotFail is1
+    | CanFail is1 -> function
+      | CanFail is2 -> CanFail (fun fail_code ->
+          let inner_fail = G.new_depth_label () in
+          let inner_fail_code = Bool.lit false ^^ G.branch_to_ inner_fail in
+          G.labeled_block1 I64Type inner_fail (is1 inner_fail_code ^^ Bool.lit true) ^^
+          E.if0 G.nop (is2 fail_code)
+        )
+      | CannotFail is2 -> CannotFail (
+          let inner_fail = G.new_depth_label () in
+          let inner_fail_code = Bool.lit false ^^ G.branch_to_ inner_fail in
+          G.labeled_block1 I64Type inner_fail (is1 inner_fail_code ^^ Bool.lit true) ^^
+          E.if0 G.nop is2
+        )
+
+  let orElses : patternCode list -> patternCode -> patternCode =
+    List.fold_right orElse
+
+  let patternFailTrap env = E.trap_with env "pattern failed"
+
+  let orPatternFailure env pcode =
+    with_fail (patternFailTrap env) pcode
+
+  let orsPatternFailure env pcodes =
+    orPatternFailure env (orElses pcodes definiteFail)
+
+  let with_region at = function
+    | CannotFail is -> CannotFail (G.with_region at is)
+    | CanFail is -> CanFail (fun k -> G.with_region at (is k))
+
+end (* PatCode *)
+open PatCode
+
+(* All the code above is independent of the IR *)
+open Ir
+
+module AllocHow = struct
+  (*
+  When compiling a (recursive) block, we need to do a dependency analysis, to
+  find out how the things are allocated. The options are:
+  - const:  completely known, constant, not stored anywhere (think static function)
+            (no need to mention in a closure)
+  - local:  only needed locally, stored in a Wasm local, immutable
+            (can be copied into a closure by value)
+  - local mutable: only needed locally, stored in a Wasm local, mutable
+            (cannot be copied into a closure)
+  - heap allocated: stored on the dynamic heap, address in Wasm local
+            (can be copied into a closure by reference)
+  - static heap: stored on the static heap, address known statically
+            (no need to mention in a closure)
+
+  The goal is to avoid dynamic allocation where possible (and use locals), and
+  to avoid turning function references into closures.
+
+  The rules are:
+  - functions are const, unless they capture something that is not a const
+    function or a static heap allocation.
+    in particular, top-level functions are always const
+  - everything that is captured on the top-level needs to be statically
+    heap-allocated
+  - everything that is captured before it is defined, or is captured and mutable
+    needs to be dynamically heap-allocated
+  - the rest can be local
+  *)
+
+  module M = Freevars.M
+  module S = Freevars.S
+
+  (*
+  We represent this as a lattice as follows:
+  *)
+  type how = Const | LocalImmut of SR.t | LocalMut of SR.t | StoreHeap | StoreStatic
+  type allocHow = how M.t
+
+  let disjoint_union : allocHow -> allocHow -> allocHow =
+    M.union (fun v _ _ -> fatal "AllocHow.disjoint_union: %s" v)
+
+  let join : allocHow -> allocHow -> allocHow =
+    M.union (fun _ x y -> Some (match x, y with
+      | StoreStatic, StoreHeap | StoreHeap, StoreStatic
+      ->  fatal "AllocHow.join: cannot join StoreStatic and StoreHeap"
+
+      | _, StoreHeap     | StoreHeap,      _ -> StoreHeap
+      | _, StoreStatic   | StoreStatic,    _ -> StoreStatic
+      | _, LocalMut sr   | LocalMut sr,    _ -> LocalMut sr
+      | _, LocalImmut sr | LocalImmut sr,  _ -> LocalImmut sr
+
+      | Const, Const -> Const
+    ))
+  let joins = List.fold_left join M.empty
+
+  let map_of_set = Freevars.map_of_set
+  let set_of_map = Freevars.set_of_map
+
+  (* Various filters used in the set operations below *)
+  let is_local_mut _ = function
+    | LocalMut _ -> true
+    | _ -> false
+
+  let is_local _ = function
+    | LocalImmut _ | LocalMut _ -> true
+    | _ -> false
+
+  let how_captured lvl how seen captured =
+    (* What to do so that we can capture something?
+       * For local blocks, put on the dynamic heap:
+         - mutable things
+         - not yet defined things
+       * For top-level blocks, put on the static heap:
+         - everything that is non-static (i.e. still in locals)
+    *)
+    match lvl with
+    | VarEnv.NotTopLvl ->
+      map_of_set StoreHeap (S.union
+        (S.inter (set_of_map (M.filter is_local_mut how)) captured)
+        (S.inter (set_of_map (M.filter is_local how)) (S.diff captured seen))
+      )
+    | VarEnv.TopLvl ->
+      map_of_set StoreStatic
+        (S.inter (set_of_map (M.filter is_local how)) captured)
+
+  (* A bit like StackRep.of_type, but only for those types and stackreps that
+     we support in local variables *)
+  let stackrep_of_type t =
+    let open Type in
+    match normalize t with
+    | Prim ((Nat64 | Int64 | Nat32 | Int32 | Nat16 | Int16 | Nat8 | Int8 | Char) as pty) ->
+       SR.UnboxedWord64 pty
+    | Prim Float -> SR.UnboxedFloat64
+    | _ -> SR.Vanilla
+
+  let dec lvl how_outer (seen, how0) dec =
+    let how_all = disjoint_union how_outer how0 in
+
+    let (f,d) = Freevars.dec dec in
+    let captured = S.inter (set_of_map how0) (Freevars.captured_vars f) in
+
+    (* Which allocation is required for the things defined here? *)
+    let how1 = match dec.it with
+      (* Mutable variables are, well, mutable *)
+      | VarD _ ->
+        M.map (fun t -> LocalMut (stackrep_of_type t)) d
+
+      (* Constant expressions (trusting static_vals.ml) *)
+      | LetD (_, e) when e.note.Note.const ->
+        M.map (fun _ -> (Const : how)) d
+
+      (* References to mutboxes *)
+      | RefD _ ->
+        M.map (fun _ -> StoreHeap) d
+
+      (* Everything else needs at least a local *)
+      | _ ->
+        M.map (fun t -> LocalImmut (stackrep_of_type t)) d in
+
+    (* Which allocation does this require for its captured things? *)
+    let how2 = how_captured lvl how_all seen captured in
+
+    let how = joins [how0; how1; how2] in
+    let seen' = S.union seen (set_of_map d)
+    in (seen', how)
+
+  (* find the allocHow for the variables currently in scope *)
+  (* we assume things are mutable, as we do not know better here *)
+  let how_of_ae ae : allocHow =
+    M.map (fun (l, _) -> match l with
+    | VarEnv.Const _        -> (Const : how)
+    | VarEnv.Static _       -> StoreStatic
+    | VarEnv.HeapInd _      -> StoreHeap
+    | VarEnv.Local (sr, _)  -> LocalMut sr (* conservatively assume mutable *)
+    | VarEnv.PublicMethod _ -> LocalMut SR.Vanilla
+    ) ae.VarEnv.vars
+
+  let decs (ae : VarEnv.t) decs captured_in_body : allocHow =
+    let lvl = ae.VarEnv.lvl in
+    let how_outer = how_of_ae ae in
+    let defined_here = snd (Freevars.decs decs) in (* TODO: implement gather_decs more directly *)
+    let how_outer = Freevars.diff how_outer defined_here in (* shadowing *)
+    let how0 = M.map (fun _t -> (Const : how)) defined_here in
+    let captured = S.inter (set_of_map defined_here) captured_in_body in
+    let rec go how =
+      let seen, how1 = List.fold_left (dec lvl how_outer) (S.empty, how) decs in
+      assert (S.equal seen (set_of_map defined_here));
+      let how2 = how_captured lvl how1 seen captured in
+      let how' = join how1 how2 in
+      if M.equal (=) how how' then how' else go how' in
+    go how0
+
+  (* Functions to extend the environment (and possibly allocate memory)
+     based on how we want to store them. *)
+  let add_local env ae how name typ : VarEnv.t * G.t =
+    match M.find name how with
+    | (Const : how) -> (ae, G.nop)
+    | LocalImmut sr | LocalMut sr ->
+      let ae1, _ = VarEnv.add_direct_local env ae name sr typ in
+      (ae1, G.nop)
+    | StoreHeap ->
+      let ae1, i = VarEnv.add_local_with_heap_ind env ae name typ in
+      let alloc_code = MutBox.alloc env ^^ G.i (LocalSet (nr i)) in
+      (ae1, alloc_code)
+    | StoreStatic ->
+      let index = MutBox.add_global_mutbox env in
+      let ae1 = VarEnv.add_static_variable ae name index typ in
+      (ae1, G.nop)
+
+  let add_local_for_alias env ae how name typ : VarEnv.t * G.t =
+    match M.find name how with
+    | StoreHeap ->
+      let ae1, _ = VarEnv.add_local_with_heap_ind env ae name typ in
+      ae1, G.nop
+    | _ -> assert false
+
+end (* AllocHow *)
+
+(* The actual compiler code that looks at the AST *)
+
+(* wraps a bigint in range [0…2^64-1] into range [-2^63…2^63-1] *)
+let nat64_to_int64 n =
+  let open Big_int in
+  if ge_big_int n (power_int_positive_int 2 63)
+  then sub_big_int n (power_int_positive_int 2 64)
+  else n
+
+let const_lit_of_lit : Ir.lit -> Const.lit = function
+  | BoolLit b     -> Const.Bool b
+  | IntLit n
+  | NatLit n      -> Const.BigInt (Numerics.Nat.to_big_int n)
+  | Int8Lit n     -> Const.Vanilla (TaggedSmallWord.vanilla_lit Type.Int8 (Numerics.Int_8.to_int n))
+  | Nat8Lit n     -> Const.Vanilla (TaggedSmallWord.vanilla_lit Type.Nat8 (Numerics.Nat8.to_int n))
+  | Int16Lit n    -> Const.Vanilla (TaggedSmallWord.vanilla_lit Type.Int16 (Numerics.Int_16.to_int n))
+  | Nat16Lit n    -> Const.Vanilla (TaggedSmallWord.vanilla_lit Type.Nat16 (Numerics.Nat16.to_int n))
+  | Int32Lit n    -> Const.Vanilla (TaggedSmallWord.vanilla_lit Type.Int32 (Numerics.Int_32.to_int n))
+  | Nat32Lit n    -> Const.Vanilla (TaggedSmallWord.vanilla_lit Type.Nat32 (Numerics.Nat32.to_int n))
+  | Int64Lit n    -> Const.Word64 (Type.Int64, (Big_int.int64_of_big_int (Numerics.Int_64.to_big_int n)))
+  | Nat64Lit n    -> Const.Word64 (Type.Nat64, (Big_int.int64_of_big_int (nat64_to_int64 (Numerics.Nat64.to_big_int n))))
+  | CharLit c     -> Const.Vanilla (TaggedSmallWord.vanilla_lit Type.Char c)
+  | NullLit       -> Const.Null
+  | TextLit t     -> Const.Text t
+  | BlobLit t     -> Const.Blob t
+  | FloatLit f    -> Const.Float64 f
+
+let const_of_lit lit =
+  Const.Lit (const_lit_of_lit lit)
+
+let compile_lit lit =
+  SR.Const (const_of_lit lit), G.nop
+
+let compile_lit_as env sr_out lit =
+  let sr_in, code = compile_lit lit in
+  code ^^ StackRep.adjust env sr_in sr_out
+
+(* helper, traps with message *)
+let then_arithmetic_overflow env =
+  E.then_trap_with env "arithmetic overflow"
+
+(* The first returned StackRep is for the arguments (expected), the second for the results (produced) *)
+let compile_unop env t op =
+  let open Operator in
+  match op, t with
+  | _, Type.Non ->
+    SR.Vanilla, SR.Unreachable, G.i Unreachable
+  | NegOp, Type.(Prim Int) ->
+    SR.Vanilla, SR.Vanilla,
+    BigNum.compile_neg env
+  | NegOp, Type.(Prim ((Int8 | Int16 | Int32 | Int64) as p)) ->
+    StackRep.of_type t, StackRep.of_type t,
+    Func.share_code1 Func.Never env (prim_fun_name p "neg_trap") ("n", I64Type) [I64Type] (fun env get_n ->
+      get_n ^^
+      compile_eq_const 0x8000_0000_0000_0000L ^^
+      then_arithmetic_overflow env ^^
+      compile_unboxed_zero ^^
+      get_n ^^
+      G.i (Binary (Wasm_exts.Values.I64 I64Op.Sub))
+    )
+  | NegOp, Type.(Prim Float) ->
+    SR.UnboxedFloat64, SR.UnboxedFloat64,
+    G.i (Unary (Wasm_exts.Values.F64 F64Op.Neg))
+  | NotOp, Type.(Prim (Nat64|Int64 as p)) ->
+     SR.UnboxedWord64 p, SR.UnboxedWord64 p,
+     compile_xor_const (-1L)
+  | NotOp, Type.(Prim (Nat8|Nat16|Nat32|Int8|Int16|Int32 as ty)) ->
+     StackRep.of_type t, StackRep.of_type t,
+     compile_unboxed_const (TaggedSmallWord.mask_of_type ty) ^^
+     G.i (Binary (Wasm_exts.Values.I64 I64Op.Xor))
+  | _ ->
+    todo "compile_unop"
+      (Wasm.Sexpr.Node ("BinOp", [ Arrange_ops.unop op ]))
+      (SR.Vanilla, SR.Unreachable, E.trap_with env "TODO: compile_unop")
+
+(* Logarithmic helpers for deciding whether we can carry out operations in constant bitwidth *)
+
+(* helper, traps with message *)
+let else_arithmetic_overflow env =
+  E.else_trap_with env "arithmetic overflow"
+
+(* helpers to decide if Int64 arithmetic can be carried out on the fast path *)
+let additiveInt64_shortcut fast env get_a get_b slow =
+  get_a ^^ get_a ^^ compile_shl_const 1L ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Xor)) ^^ compile_shrU_const 63L ^^
+  get_b ^^ get_b ^^ compile_shl_const 1L ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Xor)) ^^ compile_shrU_const 63L ^^
+  G.i (Binary (Wasm_exts.Values.I64 I64Op.Or)) ^^
+  compile_test I64Op.Eqz ^^
+  E.if1 I64Type
+    (get_a ^^ get_b ^^ fast)
+    slow
+
+let mulInt64_shortcut fast env get_a get_b slow =
+  get_a ^^ get_a ^^ compile_shl_const 1L ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Xor)) ^^ G.i (Unary (Wasm_exts.Values.I64 I64Op.Clz)) ^^
+  get_b ^^ get_b ^^ compile_shl_const 1L ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Xor)) ^^ G.i (Unary (Wasm_exts.Values.I64 I64Op.Clz)) ^^
+  G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)) ^^
+  compile_unboxed_const 65L ^^ compile_comparison I64Op.GeU ^^
+  E.if1 I64Type
+    (get_a ^^ get_b ^^ fast)
+    slow
+
+let powInt64_shortcut fast env get_a get_b slow =
+  get_b ^^ compile_test I64Op.Eqz ^^
+  E.if1 I64Type
+    (compile_unboxed_const 1L) (* ^0 *)
+    begin (* ^(1+n) *)
+      get_a ^^ compile_unboxed_const (-1L) ^^ compile_comparison I64Op.Eq ^^
+      E.if1 I64Type
+        begin (* -1 ** (1+exp) == if even (1+exp) then 1 else -1 *)
+          get_b ^^ compile_unboxed_const 1L ^^
+          G.i (Binary (Wasm_exts.Values.I64 I64Op.And)) ^^ compile_test I64Op.Eqz ^^
+          E.if1 I64Type
+            (compile_unboxed_const 1L)
+            get_a
+        end
+        begin
+          get_a ^^ compile_shrS_const 1L ^^
+          compile_test I64Op.Eqz ^^
+          E.if1 I64Type
+            get_a (* {0,1}^(1+n) *)
+            begin
+              get_b ^^ compile_unboxed_const 64L ^^
+              compile_comparison I64Op.GeU ^^ then_arithmetic_overflow env ^^
+              get_a ^^ get_a ^^ compile_shl_const 1L ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Xor)) ^^
+              G.i (Unary (Wasm_exts.Values.I64 I64Op.Clz)) ^^ compile_sub_const 63L ^^
+              get_b ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Mul)) ^^
+              compile_unboxed_const (-63L) ^^ compile_comparison I64Op.GeS ^^
+              E.if1 I64Type
+                (get_a ^^ get_b ^^ fast)
+                slow
+            end
+        end
+    end
+
+
+(* kernel for Int64 arithmetic, invokes estimator for fast path *)
+let compile_Int64_kernel env name op shortcut =
+  Func.share_code2 Func.Always env (prim_fun_name Type.Int64 name)
+    (("a", I64Type), ("b", I64Type)) [I64Type]
+    BigNum.(fun env get_a get_b ->
+    shortcut
+      env
+      get_a
+      get_b
+      begin
+        let (set_res, get_res) = new_local env "res" in
+        get_a ^^ from_signed_word64 env ^^
+        get_b ^^ from_signed_word64 env ^^
+        op env ^^
+        set_res ^^ get_res ^^
+        fits_signed_bits env 64 ^^
+        else_arithmetic_overflow env ^^
+        get_res ^^ truncate_to_word64 env
+      end)
+
+
+(* helpers to decide if Nat64 arithmetic can be carried out on the fast path *)
+let additiveNat64_shortcut fast env get_a get_b slow =
+  get_a ^^ compile_shrU_const 62L ^^
+  get_b ^^ compile_shrU_const 62L ^^
+  G.i (Binary (Wasm_exts.Values.I64 I64Op.Or)) ^^
+  compile_test I64Op.Eqz ^^
+  E.if1 I64Type
+    (get_a ^^ get_b ^^ fast)
+    slow
+
+let mulNat64_shortcut fast env get_a get_b slow =
+  get_a ^^ G.i (Unary (Wasm_exts.Values.I64 I64Op.Clz)) ^^
+  get_b ^^ G.i (Unary (Wasm_exts.Values.I64 I64Op.Clz)) ^^
+  G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)) ^^
+  compile_unboxed_const 64L ^^ compile_comparison I64Op.GeU ^^
+  E.if1 I64Type
+    (get_a ^^ get_b ^^ fast)
+    slow
+
+let powNat64_shortcut fast env get_a get_b slow =
+  get_b ^^ compile_test I64Op.Eqz ^^
+  E.if1 I64Type
+    (compile_unboxed_const 1L) (* ^0 *)
+    begin (* ^(1+n) *)
+      get_a ^^ compile_shrU_const 1L ^^
+      compile_test I64Op.Eqz ^^
+      E.if1 I64Type
+        get_a (* {0,1}^(1+n) *)
+        begin
+          get_b ^^ compile_unboxed_const 64L ^^ compile_comparison I64Op.GeU ^^ then_arithmetic_overflow env ^^
+          get_a ^^ G.i (Unary (Wasm_exts.Values.I64 I64Op.Clz)) ^^ compile_sub_const 64L ^^
+          get_b ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Mul)) ^^ compile_unboxed_const (-64L) ^^ compile_comparison I64Op.GeS ^^
+          E.if1 I64Type
+            (get_a ^^ get_b ^^ fast)
+            slow
+        end
+    end
+
+
+(* kernel for Nat64 arithmetic, invokes estimator for fast path *)
+let compile_Nat64_kernel env name op shortcut =
+  Func.share_code2 Func.Always env (prim_fun_name Type.Nat64 name)
+    (("a", I64Type), ("b", I64Type)) [I64Type]
+    BigNum.(fun env get_a get_b ->
+    shortcut
+      env
+      get_a
+      get_b
+      begin
+        let (set_res, get_res) = new_local env "res" in
+        get_a ^^ from_word64 env ^^
+        get_b ^^ from_word64 env ^^
+        op env ^^
+        set_res ^^ get_res ^^
+        fits_unsigned_bits env 64 ^^
+        else_arithmetic_overflow env ^^
+        get_res ^^ truncate_to_word64 env
+      end)
+
+
+(* Compiling Int/Nat32 ops by conversion to/from i64. *)
+
+(* helper, expects i64 on stack *)
+let enforce_32_unsigned_bits env =
+  compile_bitand_const 0xFFFF_FFFF_0000_0000L ^^
+  compile_test I64Op.Eqz ^^
+  else_arithmetic_overflow env
+
+(* helper, expects two identical i64s on stack *)
+let enforce_32_signed_bits env =
+  compile_shl_const 1L ^^
+  G.i (Binary (Wasm_exts.Values.I64 I64Op.Xor)) ^^
+  enforce_32_unsigned_bits env
+
+(* TODO: Combine this with `compile_smallInt_kernel`, to support `Int32`, `Int16`, and `Int8` at once. *)
+let compile_Int32_kernel env name op =
+     Func.share_code2 Func.Always env (prim_fun_name Type.Int32 name)
+       (("a", I64Type), ("b", I64Type)) [I64Type]
+       (fun env get_a get_b ->
+         let (set_res, get_res) = new_local env "res" in
+         get_a ^^ compile_shrS_const 32L ^^
+         get_b ^^ compile_shrS_const 32L ^^
+         G.i (Binary (Wasm_exts.Values.I64 op)) ^^
+         set_res ^^ get_res ^^ get_res ^^
+         enforce_32_signed_bits env ^^
+         get_res ^^ compile_shl_const 32L)
+
+(* TODO: Combine this with `compile_smallInt_kernel`, to support `Nat32`, `Nat16`, and `Nat8` at once. *)
+let compile_Nat32_kernel env name op =
+     Func.share_code2 Func.Always env (prim_fun_name Type.Nat32 name)
+       (("a", I64Type), ("b", I64Type)) [I64Type]
+       (fun env get_a get_b ->
+         let (set_res, get_res) = new_local env "res" in
+         get_a ^^ compile_shrU_const 32L ^^
+         get_b ^^ compile_shrU_const 32L ^^
+         G.i (Binary (Wasm_exts.Values.I64 op)) ^^
+         set_res ^^ get_res ^^
+         enforce_32_unsigned_bits env ^^
+         get_res ^^ compile_shl_const 32L)
+
+(* Customisable kernels for 8/16bit arithmetic via 64 bits. *)
+(* TODO: Include the support for 32bit which is now also compact on 64-bit. 
+   Eventually, `compile_Int32_kernel` and `compile_Nat32_kernel` can be removed. *)
+
+(* helper, expects i64 on stack *)
+let enforce_unsigned_bits env n =
+  compile_bitand_const Int64.(shift_left minus_one n) ^^
+  compile_test I64Op.Eqz ^^
+  else_arithmetic_overflow env
+
+let enforce_16_unsigned_bits env = enforce_unsigned_bits env 16
+
+(* helper, expects two identical i64s on stack *)
+let enforce_signed_bits env n =
+  compile_shl_const 1L ^^ 
+  G.i (Binary (Wasm_exts.Values.I64 I64Op.Xor)) ^^
+  enforce_unsigned_bits env n
+
+let enforce_16_signed_bits env = enforce_signed_bits env 16
+
+let compile_smallInt_kernel' env ty name op =
+  Func.share_code2 Func.Always env (prim_fun_name ty name)
+    (("a", I64Type), ("b", I64Type)) [I64Type]
+    (fun env get_a get_b ->
+      let (set_res, get_res) = new_local env "res" in
+      get_a ^^ compile_shrS_const 48L ^^
+      get_b ^^ compile_shrS_const 48L ^^
+      op ^^
+      set_res ^^ get_res ^^ get_res ^^
+      enforce_16_signed_bits env ^^
+      get_res ^^ compile_shl_const 48L)
+
+let compile_smallInt_kernel env ty name op =
+  compile_smallInt_kernel' env ty name (G.i (Binary (Wasm_exts.Values.I64 op)))
+
+let compile_smallNat_kernel' env ty name op =
+  Func.share_code2 Func.Always env (prim_fun_name ty name)
+    (("a", I64Type), ("b", I64Type)) [I64Type]
+    (fun env get_a get_b ->
+      let (set_res, get_res) = new_local env "res" in
+      get_a ^^ compile_shrU_const 48L ^^
+      get_b ^^ compile_shrU_const 48L ^^
+      op ^^
+      set_res ^^ get_res ^^
+      enforce_16_unsigned_bits env ^^
+      get_res ^^ compile_shl_const 48L)
+
+let compile_smallNat_kernel env ty name op =
+  compile_smallNat_kernel' env ty name (G.i (Binary (Wasm_exts.Values.I64 op)))
+
+(* The first returned StackRep is for the arguments (expected), the second for the results (produced) *)
+let compile_binop env t op : SR.t * SR.t * G.t =
+  if t = Type.Non then SR.Vanilla, SR.Unreachable, G.i Unreachable else
+  StackRep.of_type t,
+  StackRep.of_type t,
+  Operator.(match t, op with
+  | Type.(Prim (Nat | Int)),                  AddOp -> BigNum.compile_add env
+  | Type.(Prim (Nat64|Int64)),                WAddOp -> G.i (Binary (Wasm_exts.Values.I64 I64Op.Add))
+  | Type.(Prim Int64),                        AddOp ->
+    compile_Int64_kernel env "add" BigNum.compile_add
+      (additiveInt64_shortcut (G.i (Binary (Wasm_exts.Values.I64 I64Op.Add))))
+  | Type.(Prim Nat64),                        AddOp ->
+    compile_Nat64_kernel env "add" BigNum.compile_add
+      (additiveNat64_shortcut (G.i (Binary (Wasm_exts.Values.I64 I64Op.Add))))
+  | Type.(Prim Nat),                          SubOp -> BigNum.compile_unsigned_sub env
+  | Type.(Prim Int),                          SubOp -> BigNum.compile_signed_sub env
+  | Type.(Prim (Nat | Int)),                  MulOp -> BigNum.compile_mul env
+  | Type.(Prim (Nat64|Int64)),                WMulOp -> G.i (Binary (Wasm_exts.Values.I64 I64Op.Mul))
+  | Type.(Prim Int64),                        MulOp ->
+    compile_Int64_kernel env "mul" BigNum.compile_mul
+      (mulInt64_shortcut (G.i (Binary (Wasm_exts.Values.I64 I64Op.Mul))))
+  | Type.(Prim Nat64),                        MulOp ->
+    compile_Nat64_kernel env "mul" BigNum.compile_mul
+      (mulNat64_shortcut (G.i (Binary (Wasm_exts.Values.I64 I64Op.Mul))))
+  | Type.(Prim Nat64),                        DivOp -> G.i (Binary (Wasm_exts.Values.I64 I64Op.DivU))
+  | Type.(Prim Nat64) ,                       ModOp -> G.i (Binary (Wasm_exts.Values.I64 I64Op.RemU))
+  | Type.(Prim Int64),                        DivOp -> G.i (Binary (Wasm_exts.Values.I64 I64Op.DivS))
+  | Type.(Prim Int64) ,                       ModOp -> G.i (Binary (Wasm_exts.Values.I64 I64Op.RemS))
+  | Type.(Prim Nat),                          DivOp -> BigNum.compile_unsigned_div env
+  | Type.(Prim Nat),                          ModOp -> BigNum.compile_unsigned_rem env
+  | Type.(Prim (Nat64|Int64)),                WSubOp -> G.i (Binary (Wasm_exts.Values.I64 I64Op.Sub))
+  | Type.(Prim Int64),                        SubOp ->
+    compile_Int64_kernel env "sub" BigNum.compile_signed_sub
+      (additiveInt64_shortcut (G.i (Binary (Wasm_exts.Values.I64 I64Op.Sub))))
+  | Type.(Prim Nat64),                        SubOp ->
+    compile_Nat64_kernel env "sub" BigNum.compile_unsigned_sub
+      (fun env get_a get_b ->
+        additiveNat64_shortcut
+          (compile_comparison I64Op.GeU ^^
+           else_arithmetic_overflow env ^^
+           get_a ^^ get_b ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Sub)))
+          env get_a get_b)
+  | Type.(Prim Int),                          DivOp -> BigNum.compile_signed_div env
+  | Type.(Prim Int),                          ModOp -> BigNum.compile_signed_mod env
+
+  | Type.Prim Type.(Nat8|Nat16|Nat32|Int8|Int16|Int32),
+                                              WAddOp -> G.i (Binary (Wasm_exts.Values.I64 I64Op.Add))
+  | Type.(Prim Int32),                        AddOp -> compile_Int32_kernel env "add" I64Op.Add
+  | Type.Prim Type.(Int8 | Int16 as ty),      AddOp -> compile_smallInt_kernel env ty "add" I64Op.Add
+  | Type.(Prim Nat32),                        AddOp -> compile_Nat32_kernel env "add" I64Op.Add
+  | Type.Prim Type.(Nat8 | Nat16 as ty),      AddOp -> compile_smallNat_kernel env ty "add" I64Op.Add
+  | Type.(Prim Float),                        AddOp -> G.i (Binary (Wasm_exts.Values.F64 F64Op.Add))
+  | Type.Prim Type.(Nat8|Nat16|Nat32|Int8|Int16|Int32),
+                                              WSubOp -> G.i (Binary (Wasm_exts.Values.I64 I64Op.Sub))
+  | Type.(Prim Int32),                        SubOp -> compile_Int32_kernel env "sub" I64Op.Sub
+  | Type.(Prim (Int8|Int16 as ty)),           SubOp -> compile_smallInt_kernel env ty "sub" I64Op.Sub
+  | Type.(Prim Nat32),                        SubOp -> compile_Nat32_kernel env "sub" I64Op.Sub
+  | Type.(Prim (Nat8|Nat16 as ty)),           SubOp -> compile_smallNat_kernel env ty "sub" I64Op.Sub
+  | Type.(Prim Float),                        SubOp -> G.i (Binary (Wasm_exts.Values.F64 F64Op.Sub))
+  | Type.Prim Type.(Nat8|Nat16|Nat32|Int8|Int16|Int32 as ty),
+                                              WMulOp -> TaggedSmallWord.compile_word_mul env ty
+  | Type.(Prim Int32),                        MulOp -> compile_Int32_kernel env "mul" I64Op.Mul
+  | Type.(Prim Int16),                        MulOp -> compile_smallInt_kernel env Type.Int16 "mul" I64Op.Mul
+  | Type.(Prim Int8),                         MulOp -> compile_smallInt_kernel' env Type.Int8 "mul"
+                                                         (compile_shrS_const 8L ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Mul)))
+  | Type.(Prim Nat32),                        MulOp -> compile_Nat32_kernel env "mul" I64Op.Mul
+  | Type.(Prim Nat16),                        MulOp -> compile_smallNat_kernel env Type.Nat16 "mul" I64Op.Mul
+  | Type.(Prim Nat8),                         MulOp -> compile_smallNat_kernel' env Type.Nat8 "mul"
+                                                         (compile_shrU_const 8L ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Mul)))
+  | Type.(Prim Float),                        MulOp -> G.i (Binary (Wasm_exts.Values.F64 F64Op.Mul))
+  | Type.(Prim (Nat8|Nat16|Nat32 as ty)),     DivOp -> G.i (Binary (Wasm_exts.Values.I64 I64Op.DivU)) ^^
+                                                       TaggedSmallWord.msb_adjust ty
+  | Type.(Prim (Nat8|Nat16|Nat32)),           ModOp -> G.i (Binary (Wasm_exts.Values.I64 I64Op.RemU))
+  | Type.(Prim (Int8|Int16|Int32 as ty)),           DivOp ->
+    Func.share_code2 Func.Always env (prim_fun_name ty "div")
+      (("a", I64Type), ("b", I64Type)) [I64Type]
+      (fun env get_a get_b ->
+        let (set_res, get_res) = new_local env "res" in
+        get_a ^^ get_b ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.DivS)) ^^
+        TaggedSmallWord.msb_adjust ty ^^ set_res ^^
+        get_a ^^ compile_eq_const 0x8000_0000_0000_0000L ^^
+        E.if_ env [I64Type]
+          begin
+            get_b ^^ TaggedSmallWord.lsb_adjust ty ^^ compile_eq_const (-1L) ^^
+            E.if_ env [I64Type]
+              (G.i Unreachable)
+              get_res
+          end
+          get_res)
+  | Type.(Prim Float),                        DivOp -> G.i (Binary (Wasm_exts.Values.F64 F64Op.Div))
+  | Type.(Prim Float),                        ModOp -> E.call_import env "rts" "fmod" (* musl *)
+  | Type.(Prim (Int8|Int16|Int32)),           ModOp -> G.i (Binary (Wasm_exts.Values.I64 I64Op.RemS))
+  | Type.(Prim (Nat8|Nat16|Nat32 as ty)),     WPowOp -> TaggedSmallWord.compile_nat_power env ty
+  | Type.(Prim (Int8|Int16|Int32 as ty)),     WPowOp -> TaggedSmallWord.compile_int_power env ty
+  | Type.(Prim ((Nat8|Nat16|Nat32) as ty)),         PowOp ->
+    Func.share_code2 Func.Always env (prim_fun_name ty "pow")
+      (("n", I64Type), ("exp", I64Type)) [I64Type]
+      (fun env get_n get_exp ->
+        let (set_res, get_res) = new_local env "res" in
+        let bits = TaggedSmallWord.bits_of_type ty in
+        let set_n = G.setter_for get_n in
+        let set_exp = G.setter_for get_exp in
+        get_n ^^ TaggedSmallWord.lsb_adjust ty ^^ set_n ^^
+        get_exp ^^ TaggedSmallWord.lsb_adjust ty ^^ set_exp ^^
+        get_exp ^^ Bool.from_int64 ^^
+        E.if1 I64Type
+          begin
+            get_n ^^ compile_shrU_const 1L ^^
+            Bool.from_int64 ^^
+            E.if1 I64Type
+              begin
+                let overflow_type = match ty with
+                | Type.Nat32 -> Type.Nat64 
+                | Type.(Nat8 | Nat16) -> Type.Nat32 
+                | _ -> assert false in
+                let overflow_type_bits = TaggedSmallWord.bits_of_type overflow_type in
+                let overflow_boundary = -Int.(sub (mul overflow_type_bits 2) 2) in
+                get_exp ^^ compile_unboxed_const 64L ^^
+                compile_comparison I64Op.GeU ^^ then_arithmetic_overflow env ^^
+                unsigned_dynamics get_n ^^ compile_sub_const (Int64.of_int bits) ^^
+                get_exp ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.Mul)) ^^
+                compile_unboxed_const (Int64.of_int overflow_boundary) ^^
+                compile_comparison I64Op.LtS ^^ then_arithmetic_overflow env ^^
+                get_n ^^ get_exp ^^
+                TaggedSmallWord.compile_nat_power env Type.Nat64 ^^ set_res ^^
+                get_res ^^ enforce_unsigned_bits env bits ^^
+                get_res ^^ TaggedSmallWord.msb_adjust ty
+              end
+              (get_n ^^ TaggedSmallWord.msb_adjust ty) (* n@{0,1} ** (1+exp) == n *)
+          end
+          (compile_unboxed_one ^^ TaggedSmallWord.msb_adjust ty)) (* x ** 0 == 1 *)
+  | Type.(Prim ((Int8|Int16|Int32) as ty)),         PowOp ->
+    Func.share_code2 Func.Always env (prim_fun_name ty "pow")
+      (("n", I64Type), ("exp", I64Type)) [I64Type]
+      (fun env get_n get_exp ->
+        let (set_res, get_res) = new_local env "res" in
+        let bits = TaggedSmallWord.bits_of_type ty in
+        let set_n = G.setter_for get_n in
+        let set_exp = G.setter_for get_exp in
+        get_n ^^ TaggedSmallWord.lsb_adjust ty ^^ set_n ^^
+        get_exp ^^ TaggedSmallWord.lsb_adjust ty ^^ set_exp ^^
+        get_exp ^^ compile_unboxed_zero ^^
+        compile_comparison I64Op.LtS ^^ E.then_trap_with env "negative power" ^^
+        get_exp ^^ Bool.from_int64 ^^
+        E.if1 I64Type
+          begin
+            get_n ^^ compile_unboxed_one ^^ compile_comparison I64Op.LeS ^^
+            get_n ^^ compile_unboxed_const (-1L) ^^ compile_comparison I64Op.GeS ^^
+            G.i (Binary (Wasm_exts.Values.I64 I64Op.And)) ^^
+            E.if1 I64Type
+              begin
+                get_n ^^ compile_unboxed_zero ^^ compile_comparison I64Op.LtS ^^
+                E.if1 I64Type
+                  begin
+                    (* -1 ** (1+exp) == if even (1+exp) then 1 else -1 *)
+                    get_exp ^^ compile_unboxed_one ^^ G.i (Binary (Wasm_exts.Values.I64 I64Op.And)) ^^
+                    E.if1 I64Type
+                      (get_n ^^ TaggedSmallWord.msb_adjust ty)
+                      (compile_unboxed_const
+                        Int64.(shift_left one (to_int (TaggedSmallWord.shift_of_type ty))))
+                  end
+                  (get_n ^^ TaggedSmallWord.msb_adjust ty) (* n@{0,1} ** (1+exp) == n *)
+              end
+              begin
+                let overflow_type = match ty with
+                | Type.Int32 -> Type.Int64 
+                | Type.(Int8 | Int16) -> Type.Int32 
+                | _ -> assert false in
+                let overflow_type_bits = TaggedSmallWord.bits_of_type overflow_type in
+                let overflow_boundary = -Int.(sub (mul overflow_type_bits 2) 2) in
+                get_exp ^^ compile_unboxed_const 64L ^^
+                compile_comparison I64Op.GeU ^^ then_arithmetic_overflow env ^^
+                signed_dynamics get_n ^^ compile_sub_const (Int64.of_int (Int.sub bits 1)) ^^
+                get_exp ^^
+                G.i (Binary (Wasm_exts.Values.I64 I64Op.Mul)) ^^
+                compile_unboxed_const (Int64.of_int overflow_boundary) ^^
+                compile_comparison I64Op.LtS ^^ then_arithmetic_overflow env ^^
+                get_n ^^ get_exp ^^
+                TaggedSmallWord.compile_nat_power env Type.Nat64 ^^ set_res ^^ 
+                get_res ^^ get_res ^^ enforce_signed_bits env bits ^^
+                get_res ^^ TaggedSmallWord.msb_adjust ty
+              end
+          end
+          (compile_unboxed_one ^^ TaggedSmallWord.msb_adjust ty)) (* x ** 0 == 1 *)
+  | Type.(Prim Int),                          PowOp ->
+    let pow = BigNum.compile_unsigned_pow env in
+    let (set_n, get_n) = new_local env "n" in
+    let (set_exp, get_exp) = new_local env "exp" in
+    set_exp ^^ set_n ^^
+    get_exp ^^ BigNum.compile_is_negative env ^^
+    E.then_trap_with env "negative power" ^^
+    get_n ^^ get_exp ^^ pow
+  | Type.(Prim Nat64),                        WPowOp -> Word64.compile_unsigned_pow env
+  | Type.(Prim Int64),                        WPowOp -> Word64.compile_signed_wpow env
+  | Type.(Prim Nat64),                        PowOp ->
+    compile_Nat64_kernel env "pow"
+      BigNum.compile_unsigned_pow
+      (powNat64_shortcut (Word64.compile_unsigned_pow env))
+  | Type.(Prim Int64),                        PowOp ->
+    let (set_exp, get_exp) = new_local env "exp" in
+    set_exp ^^ get_exp ^^
+    compile_unboxed_const 0L ^^
+    compile_comparison I64Op.LtS ^^
+    E.then_trap_with env "negative power" ^^
+    get_exp ^^
+    compile_Int64_kernel
+      env "pow" BigNum.compile_unsigned_pow
+      (powInt64_shortcut (Word64.compile_unsigned_pow env))
+  | Type.(Prim Nat),                          PowOp -> BigNum.compile_unsigned_pow env
+  | Type.(Prim Float),                        PowOp -> E.call_import env "rts" "pow" (* musl *)
+  | Type.(Prim (Nat8|Nat16|Nat32|Nat64|Int8|Int16|Int32|Int64)),
+                                              AndOp -> G.i (Binary (Wasm_exts.Values.I64 I64Op.And))
+  | Type.(Prim (Nat8|Nat16|Nat32|Nat64|Int8|Int16|Int32|Int64)),
+                                              OrOp  -> G.i (Binary (Wasm_exts.Values.I64 I64Op.Or))
+  | Type.(Prim (Nat8|Nat16|Nat32|Nat64|Int8|Int16|Int32|Int64)),
+                                              XorOp -> G.i (Binary (Wasm_exts.Values.I64 I64Op.Xor))
+  | Type.(Prim (Nat64|Int64)),                ShLOp -> G.i (Binary (Wasm_exts.Values.I64 I64Op.Shl))
+  | Type.(Prim (Nat8|Nat16|Nat32|Int8|Int16|Int32 as ty)),
+                                              ShLOp -> TaggedSmallWord.(
+     lsb_adjust ty ^^ clamp_shift_amount ty ^^
+     G.i (Binary (Wasm_exts.Values.I64 I64Op.Shl)))
+  | Type.(Prim Nat64),                        ShROp -> G.i (Binary (Wasm_exts.Values.I64 I64Op.ShrU))
+  | Type.(Prim (Nat8|Nat16|Nat32 as ty)),     ShROp -> TaggedSmallWord.(
+     lsb_adjust ty ^^ clamp_shift_amount ty ^^
+     G.i (Binary (Wasm_exts.Values.I64 I64Op.ShrU)) ^^
+     sanitize_word_result ty)
+  | Type.(Prim Int64),                        ShROp -> G.i (Binary (Wasm_exts.Values.I64 I64Op.ShrS))
+  | Type.(Prim (Int8|Int16|Int32 as ty)),     ShROp -> TaggedSmallWord.(
+     lsb_adjust ty ^^ clamp_shift_amount ty ^^
+     G.i (Binary (Wasm_exts.Values.I64 I64Op.ShrS)) ^^
+     sanitize_word_result ty)
+  | Type.(Prim (Nat64|Int64)),                RotLOp -> G.i (Binary (Wasm_exts.Values.I64 I64Op.Rotl))
+  | Type.(Prim (Nat8|Nat16|Nat32|Int8|Int16|Int32 as ty)),
+                                              RotLOp -> TaggedSmallWord.rotl env ty
+  | Type.(Prim (Nat64|Int64)),                RotROp -> G.i (Binary (Wasm_exts.Values.I64 I64Op.Rotr))
+  | Type.(Prim (Nat8|Nat16|Nat32|Int8|Int16|Int32 as ty)),
+                                              RotROp -> TaggedSmallWord.rotr env ty
+  | Type.(Prim Text), CatOp -> Text.concat env
+  | Type.Non, _ -> G.i Unreachable
+  | _ -> todo_trap env "compile_binop" (Wasm.Sexpr.Node ("BinOp", [ Arrange_ops.binop op; Arrange_type.typ t]))
+  )
+
+let compile_eq env =
+  let open Type in
+  function
+  | Prim Text -> Text.compare env Operator.EqOp
+  | Prim (Blob|Principal) | Obj (Actor, _) -> Blob.compare env (Some Operator.EqOp)
+  | Func (Shared _, _, _, _, _) -> FuncDec.equate_msgref env
+  | Prim (Nat | Int) -> BigNum.compile_eq env
+  | Prim (Bool | Int8 | Nat8 | Int16 | Nat16 | Int32 | Nat32 | Int64 | Nat64 | Char) ->
+    compile_comparison I64Op.Eq
+  | Non -> G.i Unreachable
+  | Prim Float -> compile_comparison_f64 F64Op.Eq
+  | t -> todo_trap env "compile_eq" (Arrange_type.typ t)
+
+let get_relops = Operator.(function
+  | GeOp -> Ge, I64Op.GeU, I64Op.GeS
+  | GtOp -> Gt, I64Op.GtU, I64Op.GtS
+  | LeOp -> Le, I64Op.LeU, I64Op.LeS
+  | LtOp -> Lt, I64Op.LtU, I64Op.LtS
+  | NeqOp -> assert false
+  | _ -> failwith "uncovered relop")
+
+let compile_comparison_op env t op =
+  let bigintop, u64op, s64op = get_relops op in
+  let open Type in
+  match t with
+    | Nat | Int -> BigNum.compile_relop env bigintop
+    | Nat8 | Nat16 | Nat32 | Nat64 | Char -> compile_comparison u64op
+    | Int8 | Int16 | Int32 | Int64 -> compile_comparison s64op
+    | _ -> todo_trap env "compile_comparison" (Arrange_type.prim t)
+
+let compile_relop env t op =
+  if t = Type.Non then SR.Vanilla, G.i Unreachable else
+  StackRep.of_type t,
+  let open Operator in
+  match t, op with
+  | Type.(Prim Text), _ -> Text.compare env op
+  | Type.(Prim (Blob|Principal)), _ -> Blob.compare env (Some op)
+  | _, EqOp -> compile_eq env t
+  | Type.(Prim (Nat | Nat8 | Nat16 | Nat32 | Nat64 | Int | Int8 | Int16 | Int32 | Int64 | Char as t1)), op1 ->
+    compile_comparison_op env t1 op1
+  | Type.(Prim Float), GtOp -> compile_comparison_f64 F64Op.Gt
+  | Type.(Prim Float), GeOp -> compile_comparison_f64 F64Op.Ge
+  | Type.(Prim Float), LeOp -> compile_comparison_f64 F64Op.Le
+  | Type.(Prim Float), LtOp -> compile_comparison_f64 F64Op.Lt
+  | _ -> todo_trap env "compile_relop" (Arrange_ops.relop op)
+
+let compile_load_field env typ name =
+  Object.load_idx env typ name
+
+
+(* compile_lexp is used for expressions on the left of an assignment operator.
+   Produces
+   * preparation code, to run first
+   * an expected stack rep
+   * code that expects the value to be written in that stackrep, and consumes it
+*)
+let rec compile_lexp (env : E.t) ae lexp : G.t * SR.t * G.t =
+  (fun (code, sr, fill_code) -> G.(with_region lexp.at code, sr, with_region lexp.at fill_code)) @@
+  match lexp.it with
+  | VarLE var -> Var.set_val env ae var
+  | IdxLE (e1, e2) when potential_pointer (Arr.element_type env e1.note.Note.typ) ->
+    compile_array_index env ae e1 e2 ^^
+    compile_add_const ptr_unskew,
+    SR.Vanilla,
+    Tagged.write_with_barrier env
+  | IdxLE (e1, e2) ->
+    compile_array_index env ae e1 e2,
+    SR.Vanilla,
+    store_ptr
+  | DotLE (e, n) when potential_pointer (Object.field_type env e.note.Note.typ n) ->
+    compile_exp_vanilla env ae e ^^
+    (* Only real objects have mutable fields, no need to branch on the tag *)
+    Object.idx env e.note.Note.typ n ^^
+    compile_add_const ptr_unskew,
+    SR.Vanilla,
+    Tagged.write_with_barrier env
+  | DotLE (e, n) ->
+    compile_exp_vanilla env ae e ^^
+    (* Only real objects have mutable fields, no need to branch on the tag *)
+    Object.idx env e.note.Note.typ n,
+    SR.Vanilla,
+    store_ptr
+
+(* Common code for a[e] as lexp and as exp.
+Traps or pushes the pointer to the element on the stack
+*)
+and compile_array_index env ae e1 e2 =
+    compile_exp_vanilla env ae e1 ^^ (* offset to array *)
+    compile_exp_vanilla env ae e2 ^^ (* idx *)
+    Arr.idx_bigint env
+
+and compile_prim_invocation (env : E.t) ae p es at =
+  (* for more concise code when all arguments and result use the same sr *)
+  let const_sr sr inst = sr, G.concat_map (compile_exp_as env ae sr) es ^^ inst in
+
+  begin match p, es with
+  (* Calls *)
+  | CallPrim _, [e1; e2] ->
+    let sort, control, _, arg_tys, ret_tys = Type.(as_func (promote e1.note.Note.typ)) in
+    let n_args = List.length arg_tys in
+    let return_arity = match control with
+      | Type.Returns -> List.length ret_tys
+      | Type.Replies -> 0
+      | Type.Promises -> assert false in
+
+    let fun_sr, code1 = compile_exp env ae e1 in
+
+    (* we duplicate this pattern match to emulate pattern guards *)
+    let call_as_prim = match fun_sr, sort with
+      | SR.Const Const.Fun (_, mk_fi, Const.PrimWrapper prim), _ ->
+         begin match n_args, e2.it with
+         | 0, _ -> true
+         | 1, _ -> true
+         | n, PrimE (TupPrim, es) when List.length es = n -> true
+         | _, _ -> false
+         end
+      | _ -> false in
+
+    begin match fun_sr, sort with
+      | SR.Const Const.Fun (_, mk_fi, Const.PrimWrapper prim), _ when call_as_prim ->
+         assert (sort = Type.Local);
+         (* Handle argument tuples *)
+         begin match n_args, e2.it with
+         | 0, _ ->
+           let sr, code2 = compile_prim_invocation env ae prim [] at in
+           sr,
+           code1 ^^
+           compile_exp_as env ae (StackRep.of_arity 0) e2 ^^
+           code2
+         | 1, _ ->
+           compile_prim_invocation env ae prim [e2] at
+         | n, PrimE (TupPrim, es) ->
+           assert (List.length es = n);
+           compile_prim_invocation env ae prim es at
+         | _, _ ->
+           (* ugly case; let's just call this as a function for now *)
+           raise (Invalid_argument "call_as_prim was true?")
+         end
+      | SR.Const Const.Fun (_, mk_fi, _), _ ->
+         assert (sort = Type.Local);
+         StackRep.of_arity return_arity,
+
+         code1 ^^
+         compile_unboxed_zero ^^ (* A dummy closure *)
+         compile_exp_as env ae (StackRep.of_arity n_args) e2 ^^ (* the args *)
+         G.i (Call (nr (mk_fi()))) ^^
+         FakeMultiVal.load env (Lib.List.make return_arity I64Type)
+      | _, Type.Local ->
+         let (set_clos, get_clos) = new_local env "clos" in
+
+         StackRep.of_arity return_arity,
+         code1 ^^ StackRep.adjust env fun_sr SR.Vanilla ^^
+         set_clos ^^
+         get_clos ^^
+         Closure.prepare_closure_call env ^^
+         compile_exp_as env ae (StackRep.of_arity n_args) e2 ^^
+         get_clos ^^
+         Closure.call_closure env n_args return_arity
+      | _, Type.Shared _ ->
+         (* Non-one-shot functions have been rewritten in async.ml *)
+         assert (control = Type.Returns);
+
+         let (set_meth_pair, get_meth_pair) = new_local env "meth_pair" in
+         let (set_arg, get_arg) = new_local env "arg" in
+         let _, _, _, ts, _ = Type.as_func e1.note.Note.typ in
+         let add_cycles = Internals.add_cycles env ae in
+
+         StackRep.of_arity return_arity,
+         code1 ^^ StackRep.adjust env fun_sr SR.Vanilla ^^
+         set_meth_pair ^^
+         compile_exp_vanilla env ae e2 ^^ set_arg ^^
+
+         FuncDec.ic_call_one_shot env ts get_meth_pair get_arg add_cycles
+    end
+
+  (* Operators *)
+  | UnPrim (_, Operator.PosOp), [e1] -> compile_exp env ae e1
+  | UnPrim (t, op), [e1] ->
+    let sr_in, sr_out, code = compile_unop env t op in
+    sr_out,
+    compile_exp_as env ae sr_in e1 ^^
+    code
+  | BinPrim (t, op), [e1;e2] ->
+    let sr_in, sr_out, code = compile_binop env t op in
+    sr_out,
+    compile_exp_as env ae sr_in e1 ^^
+    compile_exp_as env ae sr_in e2 ^^
+    code
+  (* special case: recognize negation *)
+  | RelPrim (Type.(Prim Bool), Operator.EqOp), [e1; {it = LitE (BoolLit false); _}] ->
+    SR.bool,
+    compile_exp_as_test env ae e1 ^^
+    compile_test I64Op.Eqz
+  | RelPrim (t, op), [e1;e2] ->
+    let sr, code = compile_relop env t op in
+    SR.bool,
+    compile_exp_as env ae sr e1 ^^
+    compile_exp_as env ae sr e2 ^^
+    code
+
+  (* Tuples *)
+  | TupPrim, es ->
+    SR.UnboxedTuple (List.length es),
+    G.concat_map (compile_exp_vanilla env ae) es
+  | ProjPrim n, [e1] ->
+    SR.Vanilla,
+    compile_exp_vanilla env ae e1 ^^ (* offset to tuple (an array) *)
+    Tuple.load_n env (Int64.of_int n)
+
+  | OptPrim, [e] ->
+    SR.Vanilla,
+    Opt.inject env (compile_exp_vanilla env ae e)
+  | TagPrim l, [e] ->
+    SR.Vanilla,
+    Variant.inject env l (compile_exp_vanilla env ae e)
+
+  | DotPrim name, [e] ->
+    let sr, code1 = compile_exp env ae e in
+    begin match sr with
+    | SR.Const Const.Obj fs ->
+      let c = List.assoc name fs in
+      SR.Const c, code1
+    | _ ->
+      SR.Vanilla,
+      code1 ^^ StackRep.adjust env sr SR.Vanilla ^^
+      Object.load_idx env e.note.Note.typ name
+    end
+  | ActorDotPrim name, [e] ->
+    SR.Vanilla,
+    compile_exp_vanilla env ae e ^^
+    IC.actor_public_field env name
+
+  | ArrayPrim (m, t), es ->
+    SR.Vanilla,
+    Arr.lit env Tagged.(if m = Ir.Var then M else I) (List.map (compile_exp_vanilla env ae) es)
+  | IdxPrim, [e1; e2] ->
+    SR.Vanilla,
+    compile_array_index env ae e1 e2 ^^
+    load_ptr
+  (* NB: all these operations assume a valid array offset fits in a compact bignum *)
+  | NextArrayOffset, [e] ->
+    let one_untagged = Int64.shift_left 1L (64 - BitTagged.ubits_of Type.Int) in
+    SR.Vanilla,
+    compile_exp_vanilla env ae e ^^ (* previous byte offset to array *)
+    compile_add_const one_untagged (* preserving the tag in low bits *)
+  | EqArrayOffset, [e1; e2] ->
+    SR.bool,
+    compile_exp_vanilla env ae e1 ^^
+    BitTagged.sanity_check_tag __LINE__ env Type.Int ^^
+    compile_exp_vanilla env ae e2 ^^
+    BitTagged.sanity_check_tag __LINE__ env Type.Int ^^
+    (* equate (without untagging) *)
+    compile_comparison I64Op.Eq
+  | DerefArrayOffset, [e1; e2] ->
+    SR.Vanilla,
+    (* NB: no bounds check on index *)
+    compile_exp_vanilla env ae e1 ^^ (* skewed pointer to array *)
+    Tagged.load_forwarding_pointer env ^^
+    compile_exp_vanilla env ae e2 ^^ (* byte offset *)
+    BitTagged.untag __LINE__ env Type.Int ^^
+    (* TODO: Refactor 3L to use word_size *)
+    compile_shl_const 3L ^^ (* effectively a multiplication by word_size *)
+    (* Note: the below two lines compile to `i64.add; i64.load offset=OFFSET`
+       with `OFFSET = Arr.header_size * word_size + ptr_unskew`,
+       thus together also unskewing the pointer and skipping administrative
+       fields, effectively arriving at the desired element *)
+    G.i (Binary (Wasm_exts.Values.I64 I64Op.Add)) ^^
+    (* Not using Tagged.load_field since it is not a proper pointer to the array start *)
+    Heap.load_field Arr.header_size (* loads the element at the byte offset *)
+  | GetLastArrayOffset, [e] ->
+    assert (BitTagged.can_tag_const Type.Int (Int64.sub Arr.max_array_size 1L));
+    SR.Vanilla,
+    compile_exp_vanilla env ae e ^^ (* array *)
+    Arr.len env ^^
+    compile_sub_const 1L ^^
+    BigNum.from_signed_word_compact env
+
+  | BreakPrim name, [e] ->
+    let d = VarEnv.get_label_depth ae name in
+    SR.Unreachable,
+    compile_exp_vanilla env ae e ^^
+    G.branch_to_ d
+  | AssertPrim, [e1] ->
+    SR.unit,
+    compile_exp_as env ae SR.bool e1 ^^
+    E.if0 G.nop (IC.fail_assert env at)
+  | RetPrim, [e] ->
+    SR.Unreachable,
+    compile_exp_as env ae (StackRep.of_arity (E.get_return_arity env)) e ^^
+    FakeMultiVal.store env (Lib.List.make (E.get_return_arity env) I64Type) ^^
+    G.i Return
+
+  (* Numeric conversions *)
+  | NumConvWrapPrim (t1, t2), [e] -> begin
+    let open Type in
+    match t1, t2 with
+    | (Nat|Int), (Nat8|Nat16|Nat32|Int8|Int16|Int32) ->
+      SR.UnboxedWord64 t2, (* ! *)
+      compile_exp_vanilla env ae e ^^
+      Prim.prim_intToWordNShifted env (TaggedSmallWord.shift_of_type t2)
+
+    | (Nat|Int), ((Nat64|Int64) as p) ->
+      SR.UnboxedWord64 p,
+      compile_exp_vanilla env ae e ^^
+      BigNum.truncate_to_word64 env
+
+    | Nat64, Int64 | Int64, Nat64 ->
+      SR.UnboxedWord64 t2,
+      compile_exp_as env ae (SR.UnboxedWord64 t1) e
+    | Nat32, Int32 | Int32, Nat32 ->
+      SR.UnboxedWord64 t2,
+      compile_exp_as env ae (SR.UnboxedWord64 t1) e
+    | Nat16, Int16 | Int16, Nat16
+    | Nat8, Int8 | Int8, Nat8 ->
+      SR.UnboxedWord64 t2,
+      compile_exp_as env ae (SR.UnboxedWord64 t1) e
+    | Char, Nat32 ->
+      SR.UnboxedWord64 Nat32,
+      compile_exp_as env ae (SR.UnboxedWord64 t1) e ^^
+      TaggedSmallWord.lsb_adjust_codepoint env ^^
+      TaggedSmallWord.msb_adjust Nat32
+
+    | _ -> SR.Unreachable, todo_trap env "compile_prim_invocation" (Arrange_ir.prim p)
+    end
+
+  | NumConvTrapPrim (t1, t2), [e] -> begin
+    let open Type in
+    match t1, t2 with
+
+    | Int, Int64 ->
+      SR.UnboxedWord64 Int64,
+      compile_exp_vanilla env ae e ^^
+      Func.share_code1 Func.Never env "Int->Int64" ("n", I64Type) [I64Type] (fun env get_n ->
+        get_n ^^
+        BigNum.fits_signed_bits env 64 ^^
+        E.else_trap_with env "losing precision" ^^
+        get_n ^^
+        BigNum.truncate_to_word64 env)
+
+    | Int, (Int8|Int16|Int32 as pty) ->
+      StackRep.of_type (Prim pty),
+      compile_exp_vanilla env ae e ^^
+      Func.share_code1 Func.Never env (prim_fun_name pty "Int->") ("n", I64Type) [I64Type] (fun env get_n ->
+        get_n ^^
+        BigNum.fits_signed_bits env (TaggedSmallWord.bits_of_type pty) ^^
+        E.else_trap_with env "losing precision" ^^
+        get_n ^^
+        BigNum.truncate_to_word32 env ^^
+        TaggedSmallWord.msb_adjust pty)
+
+    | Nat, Nat64 ->
+      SR.UnboxedWord64 Nat64,
+      compile_exp_vanilla env ae e ^^
+      Func.share_code1 Func.Never env "Nat->Nat64" ("n", I64Type) [I64Type] (fun env get_n ->
+        get_n ^^
+        BigNum.fits_unsigned_bits env 64 ^^
+        E.else_trap_with env "losing precision" ^^
+        get_n ^^
+        BigNum.truncate_to_word64 env)
+
+    | Nat, (Nat8|Nat16|Nat32 as pty) ->
+      StackRep.of_type (Prim pty),
+      compile_exp_vanilla env ae e ^^
+      Func.share_code1 Func.Never env (prim_fun_name pty "Nat->") ("n", I64Type) [I64Type] (fun env get_n ->
+        get_n ^^
+        BigNum.fits_unsigned_bits env (TaggedSmallWord.bits_of_type pty) ^^
+        E.else_trap_with env "losing precision" ^^
+        get_n ^^
+        BigNum.truncate_to_word32 env ^^
+        TaggedSmallWord.msb_adjust pty)
+
+    | (Nat8|Nat16|Nat32), Nat ->
+      SR.Vanilla,
+      compile_exp_as env ae (SR.UnboxedWord64 t1) e ^^
+      Prim.prim_shiftWordNtoUnsigned env (TaggedSmallWord.shift_of_type t1)
+
+    | (Int8|Int16|Int32), Int ->
+      SR.Vanilla,
+      compile_exp_as env ae (SR.UnboxedWord64 t1) e ^^
+      Prim.prim_shiftWordNtoSigned env (TaggedSmallWord.shift_of_type t1)
+
+    | Nat64, Nat ->
+      SR.Vanilla,
+      compile_exp_as env ae (SR.UnboxedWord64 Nat64) e ^^
+      BigNum.from_word64 env
+
+    | Int64, Int ->
+      SR.Vanilla,
+      compile_exp_as env ae (SR.UnboxedWord64 Int64) e ^^
+      BigNum.from_signed_word64 env
+
+    | Nat32, Char ->
+      SR.UnboxedWord64 Type.Char, (* ! *)
+      compile_exp_as env ae (SR.UnboxedWord64 Nat32) e ^^
+      TaggedSmallWord.lsb_adjust Nat32 ^^
+      TaggedSmallWord.check_and_msb_adjust_codepoint env (* TBR *)
+
+    | Float, Int ->
+      SR.Vanilla,
+      compile_exp_as env ae SR.UnboxedFloat64 e ^^
+      E.call_import env "rts" "bigint_of_float64"
+
+    | Int, Float ->
+      SR.UnboxedFloat64,
+      compile_exp_vanilla env ae e ^^
+      let set_b, get_b = new_local env "b" in
+      set_b ^^
+      get_b ^^
+      BitTagged.if_tagged_scalar env [F64Type]
+        (get_b ^^
+         BitTagged.untag __LINE__ env Type.Int ^^
+         G.i (Convert (Wasm_exts.Values.F64 F64Op.ConvertSI64)))
+        (get_b ^^
+         E.call_import env "rts" "bigint_to_float64")
+
+    | Float, Int64 ->
+      SR.UnboxedWord64 Int64,
+      compile_exp_as env ae SR.UnboxedFloat64 e ^^
+      G.i (Convert (Wasm_exts.Values.I64 I64Op.TruncSF64))
+
+    | Int64, Float ->
+      SR.UnboxedFloat64,
+      compile_exp_as env ae (SR.UnboxedWord64 Int64) e ^^
+      G.i (Convert (Wasm_exts.Values.F64 F64Op.ConvertSI64))
+    | (Nat8 as from_typ), (Nat16 as to_typ)
+    | (Nat16 as from_typ), (Nat32 as to_typ)
+    | (Nat32 as from_typ), (Nat64 as to_typ) ->
+      SR.UnboxedWord64 to_typ,
+      compile_exp_as env ae (SR.UnboxedWord64 from_typ) e ^^
+      TaggedSmallWord.lsb_adjust from_typ ^^
+      TaggedSmallWord.msb_adjust to_typ
+    | (Nat16 as from_typ), (Nat8 as to_typ)
+    | (Nat32 as from_typ), (Nat16 as to_typ)
+    | (Nat64 as from_typ), (Nat32 as to_typ) ->
+      SR.UnboxedWord64 to_typ,
+      let num_bits = (TaggedSmallWord.bits_of_type to_typ) in
+      let set_val, get_val = new_local env "convertee" in
+      compile_exp_as env ae (SR.UnboxedWord64 from_typ) e ^^
+      set_val ^^
+      get_val ^^
+      compile_shrU_const (Int64.of_int (64 - num_bits)) ^^
+      E.then_trap_with env "losing precision" ^^
+      get_val ^^
+      compile_shl_const (Int64.of_int num_bits)
+    | (Int8 as from_typ), (Int16 as to_typ)
+    | (Int16 as from_typ), (Int32 as to_typ)
+    | (Int32 as from_typ), (Int64 as to_typ) ->
+      SR.UnboxedWord64 to_typ,
+      compile_exp_as env ae (SR.UnboxedWord64 from_typ) e ^^
+      TaggedSmallWord.lsb_adjust from_typ ^^
+      TaggedSmallWord.msb_adjust to_typ
+    | (Int16 as from_typ), (Int8 as to_typ)
+    | (Int32 as from_typ), (Int16 as to_typ)
+    | (Int64 as from_typ), (Int32 as to_typ) ->
+      SR.UnboxedWord64 to_typ,
+      let num_bits = (TaggedSmallWord.bits_of_type to_typ) in
+      let set_val, get_val = new_local env "convertee" in
+      compile_exp_as env ae (SR.UnboxedWord64 from_typ)  e ^^
+      set_val ^^
+      get_val ^^
+      compile_shl_const (Int64.of_int num_bits) ^^
+      compile_shrS_const (Int64.of_int num_bits) ^^
+      get_val ^^
+      compile_eq env Type.(Prim from_typ) ^^
+      E.else_trap_with env "losing precision" ^^
+      get_val ^^
+      compile_shl_const (Int64.of_int num_bits)
+    | _ -> SR.Unreachable, todo_trap env "compile_prim_invocation" (Arrange_ir.prim p)
+    end
+
+  | SerializePrim ts, [e] ->
+    SR.Vanilla,
+    compile_exp_vanilla env ae e ^^
+    Serialization.serialize env ts ^^
+    Blob.of_ptr_size env
+
+  | DeserializePrim ts, [e] ->
+    StackRep.of_arity (List.length ts),
+    compile_exp_vanilla env ae e ^^
+    Bool.lit false ^^ (* can't recover *)
+    Serialization.deserialize_from_blob false env ts
+
+  | DeserializeOptPrim ts, [e] ->
+    SR.Vanilla,
+    compile_exp_vanilla env ae e ^^
+    Bool.lit true ^^ (* can (!) recover *)
+    Serialization.deserialize_from_blob false env ts ^^
+    begin match ts with
+    | [] ->
+      (* return some () *)
+      Opt.inject env (Tuple.compile_unit env)
+    | [t] ->
+      (* save to local, propagate error as null or return some value *)
+      let (set_val, get_val) = new_local env "val" in
+      set_val ^^
+      get_val ^^
+      compile_eq_const (Serialization.coercion_error_value env) ^^
+      E.if1 I64Type
+        (Opt.null_lit env)
+        (Opt.inject env get_val)
+    | ts ->
+      (* propagate any errors as null or return some tuples using shared code *)
+      let n = List.length ts in
+      let name = Printf.sprintf "to_opt_%i_tuple" n in
+      let args = Lib.List.table n (fun i -> (Printf.sprintf "arg%i" i, I64Type)) in
+      Func.share_code Func.Always env name args [I64Type] (fun env getters ->
+        let locals =
+          Lib.List.table n (fun i -> List.nth getters i) in
+        let rec go ls =
+          match ls with
+          | get_val::ls' ->
+            get_val ^^
+            compile_eq_const (Serialization.coercion_error_value env) ^^
+            E.if1 I64Type
+              (Opt.null_lit env)
+              (go ls')
+          | [] ->
+            Opt.inject env (Arr.lit env Tagged.T locals)
+        in
+        go locals)
+    end
+
+  | ICPerformGC, [] ->
+    SR.unit,
+    GC.collect_garbage env
+
+  | ICStableSize t, [e] ->
+    SR.UnboxedWord64 Type.Nat64,
+    E.trap_with env "Deprecated with enhanced orthogonal persistence"
+
+  (* Other prims, unary *)
+
+  | OtherPrim "array_len", [e] ->
+    SR.Vanilla,
+    compile_exp_vanilla env ae e ^^
+    Arr.len env ^^
+    BigNum.from_word64 env
+
+  | OtherPrim "text_len", [e] ->
+    SR.Vanilla, compile_exp_vanilla env ae e ^^ Text.len_nat env
+  | OtherPrim "text_iter", [e] ->
+    SR.Vanilla, compile_exp_vanilla env ae e ^^ Text.iter env
+  | OtherPrim "text_iter_done", [e] ->
+    SR.bool, compile_exp_vanilla env ae e ^^ Text.iter_done env
+  | OtherPrim "text_iter_next", [e] ->
+    SR.UnboxedWord64 Type.Char, compile_exp_vanilla env ae e ^^ Text.iter_next env
+  | OtherPrim "text_compare", [e1; e2] ->
+    SR.UnboxedWord64 Type.Int8,
+    compile_exp_vanilla env ae e1 ^^
+    compile_exp_vanilla env ae e2 ^^
+    E.call_import env "rts" "text_compare" ^^
+    TaggedSmallWord.msb_adjust Type.Int8
+  | OtherPrim "blob_compare", [e1; e2] ->
+    SR.UnboxedWord64 Type.Int8,
+    compile_exp_vanilla env ae e1 ^^
+    compile_exp_vanilla env ae e2 ^^
+    Blob.compare env None ^^
+    TaggedSmallWord.msb_adjust Type.Int8
+
+  | OtherPrim "blob_size", [e] ->
+    SR.Vanilla, compile_exp_vanilla env ae e ^^ Blob.len_nat env
+  | OtherPrim "blob_vals_iter", [e] ->
+    SR.Vanilla, compile_exp_vanilla env ae e ^^ Blob.iter env
+  | OtherPrim "blob_iter_done", [e] ->
+    SR.bool, compile_exp_vanilla env ae e ^^ Blob.iter_done env
+  | OtherPrim "blob_iter_next", [e] ->
+    SR.UnboxedWord64 Type.Nat8, (* ! *)
+    compile_exp_vanilla env ae e ^^ Blob.iter_next env
+
+  | OtherPrim "lsh_Nat", [e1; e2] ->
+    SR.Vanilla,
+    compile_exp_vanilla env ae e1 ^^
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat32) e2 ^^
+    BigNum.compile_lsh env
+
+  | OtherPrim "rsh_Nat", [e1; e2] ->
+    SR.Vanilla,
+    compile_exp_vanilla env ae e1 ^^
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat32) e2 ^^
+    BigNum.compile_rsh env
+
+  | OtherPrim "abs", [e] ->
+    SR.Vanilla,
+    compile_exp_vanilla env ae e ^^
+    BigNum.compile_abs env
+
+  | OtherPrim "fabs", [e] ->
+    SR.UnboxedFloat64,
+    compile_exp_as env ae SR.UnboxedFloat64 e ^^
+    G.i (Unary (Wasm_exts.Values.F64 F64Op.Abs))
+
+  | OtherPrim "fsqrt", [e] ->
+    SR.UnboxedFloat64,
+    compile_exp_as env ae SR.UnboxedFloat64 e ^^
+    G.i (Unary (Wasm_exts.Values.F64 F64Op.Sqrt))
+
+  | OtherPrim "fceil", [e] ->
+    SR.UnboxedFloat64,
+    compile_exp_as env ae SR.UnboxedFloat64 e ^^
+    G.i (Unary (Wasm_exts.Values.F64 F64Op.Ceil))
+
+  | OtherPrim "ffloor", [e] ->
+    SR.UnboxedFloat64,
+    compile_exp_as env ae SR.UnboxedFloat64 e ^^
+    G.i (Unary (Wasm_exts.Values.F64 F64Op.Floor))
+
+  | OtherPrim "ftrunc", [e] ->
+    SR.UnboxedFloat64,
+    compile_exp_as env ae SR.UnboxedFloat64 e ^^
+    G.i (Unary (Wasm_exts.Values.F64 F64Op.Trunc))
+
+  | OtherPrim "fnearest", [e] ->
+    SR.UnboxedFloat64,
+    compile_exp_as env ae SR.UnboxedFloat64 e ^^
+    G.i (Unary (Wasm_exts.Values.F64 F64Op.Nearest))
+
+  | OtherPrim "fmin", [e; f] ->
+    SR.UnboxedFloat64,
+    compile_exp_as env ae SR.UnboxedFloat64 e ^^
+    compile_exp_as env ae SR.UnboxedFloat64 f ^^
+    G.i (Binary (Wasm_exts.Values.F64 F64Op.Min))
+
+  | OtherPrim "fmax", [e; f] ->
+    SR.UnboxedFloat64,
+    compile_exp_as env ae SR.UnboxedFloat64 e ^^
+    compile_exp_as env ae SR.UnboxedFloat64 f ^^
+    G.i (Binary (Wasm_exts.Values.F64 F64Op.Max))
+
+  | OtherPrim "fcopysign", [e; f] ->
+    SR.UnboxedFloat64,
+    compile_exp_as env ae SR.UnboxedFloat64 e ^^
+    compile_exp_as env ae SR.UnboxedFloat64 f ^^
+    G.i (Binary (Wasm_exts.Values.F64 F64Op.CopySign))
+
+  | OtherPrim "Float->Text", [e] ->
+    SR.Vanilla,
+    compile_exp_as env ae SR.UnboxedFloat64 e ^^
+    compile_unboxed_const (TaggedSmallWord.vanilla_lit Type.Nat8 6) ^^
+    compile_unboxed_const (TaggedSmallWord.vanilla_lit Type.Nat8 0) ^^
+    E.call_import env "rts" "float_fmt"
+
+  | OtherPrim "fmtFloat->Text", [f; prec; mode] ->
+    SR.Vanilla,
+    compile_exp_as env ae SR.UnboxedFloat64 f ^^
+    compile_exp_vanilla env ae prec ^^
+    compile_exp_vanilla env ae mode ^^
+    E.call_import env "rts" "float_fmt"
+
+  | OtherPrim "fsin", [e] ->
+    SR.UnboxedFloat64,
+    compile_exp_as env ae SR.UnboxedFloat64 e ^^
+    E.call_import env "rts" "sin" (* musl *)
+
+  | OtherPrim "fcos", [e] ->
+    SR.UnboxedFloat64,
+    compile_exp_as env ae SR.UnboxedFloat64 e ^^
+    E.call_import env "rts" "cos" (* musl *)
+
+  | OtherPrim "ftan", [e] ->
+    SR.UnboxedFloat64,
+    compile_exp_as env ae SR.UnboxedFloat64 e ^^
+    E.call_import env "rts" "tan" (* musl *)
+
+  | OtherPrim "fasin", [e] ->
+    SR.UnboxedFloat64,
+    compile_exp_as env ae SR.UnboxedFloat64 e ^^
+    E.call_import env "rts" "asin" (* musl *)
+
+  | OtherPrim "facos", [e] ->
+    SR.UnboxedFloat64,
+    compile_exp_as env ae SR.UnboxedFloat64 e ^^
+    E.call_import env "rts" "acos" (* musl *)
+
+  | OtherPrim "fatan", [e] ->
+    SR.UnboxedFloat64,
+    compile_exp_as env ae SR.UnboxedFloat64 e ^^
+    E.call_import env "rts" "atan" (* musl *)
+
+  | OtherPrim "fatan2", [y; x] ->
+    SR.UnboxedFloat64,
+    compile_exp_as env ae SR.UnboxedFloat64 y ^^
+    compile_exp_as env ae SR.UnboxedFloat64 x ^^
+    E.call_import env "rts" "atan2" (* musl *)
+
+  | OtherPrim "fexp", [e] ->
+    SR.UnboxedFloat64,
+    compile_exp_as env ae SR.UnboxedFloat64 e ^^
+    E.call_import env "rts" "exp" (* musl *)
+
+  | OtherPrim "flog", [e] ->
+    SR.UnboxedFloat64,
+    compile_exp_as env ae SR.UnboxedFloat64 e ^^
+    E.call_import env "rts" "log" (* musl *)
+
+  (* Other prims, nullary *)
+
+  | SystemTimePrim, [] ->
+    SR.UnboxedWord64 Type.Nat64,
+    IC.get_system_time env
+
+  | OtherPrim "call_perform_status", [] ->
+    SR.UnboxedWord64 Type.Nat32,
+    IC.get_call_perform_status env ^^
+    TaggedSmallWord.msb_adjust Type.Nat32
+
+  | OtherPrim "call_perform_message", [] ->
+    SR.Vanilla,
+    IC.get_call_perform_message env
+
+  | OtherPrim "rts_version", [] ->
+    SR.Vanilla,
+    E.call_import env "rts" "version"
+
+  | OtherPrim "rts_heap_size", [] ->
+    SR.Vanilla,
+    Heap.get_heap_size env ^^ Prim.prim_word64toNat env
+
+  | OtherPrim "rts_memory_size", [] ->
+    SR.Vanilla,
+    Heap.get_memory_size ^^ BigNum.from_word64 env
+
+  | OtherPrim "rts_total_allocation", [] ->
+    SR.Vanilla,
+    Heap.get_total_allocation env ^^ BigNum.from_word64 env
+
+  | OtherPrim "rts_reclaimed", [] ->
+    SR.Vanilla,
+    Heap.get_reclaimed env ^^ BigNum.from_word64 env
+
+  | OtherPrim "rts_max_live_size", [] ->
+    SR.Vanilla,
+    Heap.get_max_live_size env ^^ BigNum.from_word64 env
+
+  | OtherPrim "rts_max_stack_size", [] ->
+    SR.Vanilla,
+    Stack.get_max_stack_size env ^^ Prim.prim_word64toNat env
+
+  | OtherPrim "rts_callback_table_count", [] ->
+    SR.Vanilla,
+    ContinuationTable.count env ^^ Prim.prim_word64toNat env
+
+  | OtherPrim "rts_callback_table_size", [] ->
+    SR.Vanilla,
+    ContinuationTable.size env ^^ Prim.prim_word64toNat env
+
+  | OtherPrim "rts_mutator_instructions", [] ->
+    SR.Vanilla,
+    GC.get_mutator_instructions env ^^ BigNum.from_word64 env
+
+  | OtherPrim "rts_collector_instructions", [] ->
+    SR.Vanilla,
+    GC.get_collector_instructions env ^^ BigNum.from_word64 env
+
+  | OtherPrim "rts_upgrade_instructions", [] ->
+    SR.Vanilla,
+    UpgradeStatistics.get_upgrade_instructions env ^^ BigNum.from_word64 env
+
+  | OtherPrim "rts_stable_memory_size", [] ->
+    SR.Vanilla,
+    StableMem.stable64_size env ^^ BigNum.from_word64 env
+
+  | OtherPrim "rts_logical_stable_memory_size", [] ->
+    SR.Vanilla,
+    StableMem.get_mem_size env ^^ BigNum.from_word64 env
+
+  (* Regions *)
+
+  | OtherPrim "regionNew", [] ->
+    SR.Vanilla,
+    Region.new_ env
+
+  | OtherPrim "regionId", [e0] ->
+     SR.Vanilla,
+     compile_exp_as env ae SR.Vanilla e0 ^^
+     Region.id env ^^
+     BigNum.from_word64 env
+
+  | OtherPrim ("regionGrow"), [e0; e1] ->
+    SR.UnboxedWord64 Type.Nat64,
+    compile_exp_as env ae SR.Vanilla e0 ^^
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e1 ^^
+    Region.grow env
+
+  | OtherPrim "regionSize", [e0] ->
+    SR.UnboxedWord64 Type.Nat64,
+    compile_exp_as env ae SR.Vanilla e0 ^^
+    Region.size env
+
+  | OtherPrim ("regionLoadBlob"), [e0; e1; e2] ->
+    SR.Vanilla,
+    compile_exp_as env ae SR.Vanilla e0 ^^
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e1 ^^
+    compile_exp_as env ae SR.Vanilla e2 ^^
+    BigNum.to_word64_with env (Blob.lit env Tagged.T "Blob size out of bounds") ^^
+    Region.load_blob env
+
+  | OtherPrim ("regionStoreBlob"), [e0; e1; e2] ->
+    SR.unit,
+    compile_exp_as env ae SR.Vanilla e0 ^^
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e1 ^^
+    compile_exp_as env ae SR.Vanilla e2 ^^
+    Region.store_blob env
+
+  | OtherPrim (("regionLoadNat8" | "regionLoadInt8" as p)), [e0; e1] ->
+    let ty = Type.(if p = "regionLoadNat8" then Nat8 else Int8) in
+    SR.UnboxedWord64 ty,
+    compile_exp_as env ae SR.Vanilla e0 ^^
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e1 ^^
+    Region.load_word8 env ^^
+    G.i (Convert (Wasm_exts.Values.I64 I64Op.(if ty = Type.Nat8 then ExtendUI32 else ExtendSI32))) ^^
+    TaggedSmallWord.msb_adjust ty
+
+  | OtherPrim (("regionStoreNat8" | "regionStoreInt8") as p), [e0; e1; e2] ->
+    let ty = Type.(if p = "regionStoreNat8" then Nat8 else Int8) in
+    SR.unit,
+    compile_exp_as env ae SR.Vanilla e0 ^^
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e1 ^^
+    compile_exp_as env ae (SR.UnboxedWord64 ty) e2 ^^
+    TaggedSmallWord.lsb_adjust ty ^^
+    G.i (Convert (Wasm_exts.Values.I32 I32Op.WrapI64)) ^^
+    Region.store_word8 env
+
+  | OtherPrim (("regionLoadNat16" | "regionLoadInt16") as p), [e0; e1] ->
+    let ty = Type.(if p = "regionLoadNat16" then Nat16 else Int16) in
+    SR.UnboxedWord64 ty,
+    compile_exp_as env ae SR.Vanilla e0 ^^
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e1 ^^
+    Region.load_word16 env ^^
+    G.i (Convert (Wasm_exts.Values.I64 I64Op.(if ty = Type.Nat16 then ExtendUI32 else ExtendSI32))) ^^
+    TaggedSmallWord.msb_adjust ty
+
+  | OtherPrim (("regionStoreNat16" | "regionStoreInt16") as p), [e0; e1; e2] ->
+    let ty = Type.(if p = "regionStoreNat16" then Nat16 else Int16) in
+    SR.unit,
+    compile_exp_as env ae SR.Vanilla e0 ^^
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e1 ^^
+    compile_exp_as env ae (SR.UnboxedWord64 ty) e2 ^^
+    TaggedSmallWord.lsb_adjust ty ^^
+    G.i (Convert (Wasm_exts.Values.I32 I32Op.WrapI64)) ^^
+    Region.store_word16 env
+
+  | OtherPrim (("regionLoadNat32" | "regionLoadInt32") as p), [e0; e1] ->
+    let ty = Type.(if p = "regionLoadNat32" then Nat32 else Int32) in
+    SR.UnboxedWord64 ty,
+    compile_exp_as env ae SR.Vanilla e0 ^^
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e1 ^^
+    Region.load_word32 env ^^
+    G.i (Convert (Wasm_exts.Values.I64 I64Op.(if ty = Type.Nat32 then ExtendUI32 else ExtendSI32))) ^^
+    TaggedSmallWord.msb_adjust ty
+
+  | OtherPrim (("regionStoreNat32" | "regionStoreInt32") as p), [e0; e1; e2] ->
+    let ty = Type.(if p = "regionStoreNat32" then Nat32 else Int32) in
+    SR.unit,
+    compile_exp_as env ae SR.Vanilla e0 ^^
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e1 ^^
+    compile_exp_as env ae (SR.UnboxedWord64 ty) e2 ^^
+    TaggedSmallWord.lsb_adjust ty ^^
+    G.i (Convert (Wasm_exts.Values.I32 I32Op.WrapI64)) ^^
+    Region.store_word32 env
+
+  | OtherPrim (("regionLoadNat64" | "regionLoadInt64") as p), [e0; e1] ->
+    let ty = Type.(if p = "regionLoadNat64" then Nat64 else Int64) in
+    SR.UnboxedWord64 ty,
+    compile_exp_as env ae SR.Vanilla e0 ^^
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e1 ^^
+    Region.load_word64 env
+
+  | OtherPrim (("regionStoreNat64" | "regionStoreInt64") as p), [e0; e1; e2] ->
+    let ty = Type.(if p = "regionStoreNat64" then Nat64 else Int64) in
+    SR.unit,
+    compile_exp_as env ae SR.Vanilla e0 ^^
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e1 ^^
+    compile_exp_as env ae (SR.UnboxedWord64 ty) e2 ^^
+    Region.store_word64 env
+
+  | OtherPrim ("regionLoadFloat"), [e0; e1] ->
+    SR.UnboxedFloat64,
+    compile_exp_as env ae SR.Vanilla e0 ^^
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e1 ^^
+    Region.load_float64 env
+
+  | OtherPrim ("regionStoreFloat"), [e0; e1; e2] ->
+    SR.unit,
+    compile_exp_as env ae SR.Vanilla e0 ^^
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e1 ^^
+    compile_exp_as env ae SR.UnboxedFloat64 e2 ^^
+    Region.store_float64 env
+
+  (* Other prims, unary *)
+
+  | OtherPrim "global_timer_set", [e] ->
+    SR.UnboxedWord64 Type.Nat64,
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e ^^
+    IC.system_call env "global_timer_set"
+
+  | OtherPrim "is_controller", [e] ->
+    SR.Vanilla,
+    let set_principal, get_principal = new_local env "principal" in
+    compile_exp_vanilla env ae e ^^
+    set_principal ^^ get_principal ^^
+    Blob.payload_ptr_unskewed env ^^
+    get_principal ^^
+    Blob.len env ^^
+    IC.is_controller env
+
+  | OtherPrim "canister_version", [] ->
+    SR.UnboxedWord64 Type.Nat64,
+    IC.canister_version env
+
+  | OtherPrim "crc32Hash", [e] ->
+    SR.UnboxedWord64 Type.Nat32,
+    compile_exp_vanilla env ae e ^^
+    E.call_import env "rts" "compute_crc32" ^^
+    G.i (Convert (Wasm_exts.Values.I64 I64Op.ExtendUI32)) ^^
+    TaggedSmallWord.msb_adjust Type.Nat32
+
+  | OtherPrim "idlHash", [e] ->
+    SR.Vanilla,
+    E.trap_with env "idlHash only implemented in interpreter"
+
+
+  | OtherPrim "popcnt8", [e] ->
+    SR.UnboxedWord64 Type.Nat8,
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat8) e ^^
+    G.i (Unary (Wasm_exts.Values.I64 I64Op.Popcnt)) ^^
+    TaggedSmallWord.msb_adjust Type.Nat8
+  | OtherPrim "popcnt16", [e] ->
+    SR.UnboxedWord64 Type.Nat16,
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat16) e ^^
+    G.i (Unary (Wasm_exts.Values.I64 I64Op.Popcnt)) ^^
+    TaggedSmallWord.msb_adjust Type.Nat16
+  | OtherPrim "popcntInt8", [e] ->
+    SR.UnboxedWord64 Type.Int8,
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Int8) e ^^
+    G.i (Unary (Wasm_exts.Values.I64 I64Op.Popcnt)) ^^
+    TaggedSmallWord.msb_adjust Type.Int8
+  | OtherPrim "popcntInt16", [e] ->
+    SR.UnboxedWord64 Type.Int16,
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Int16) e ^^
+    G.i (Unary (Wasm_exts.Values.I64 I64Op.Popcnt)) ^^
+    TaggedSmallWord.msb_adjust Type.Int16
+  | OtherPrim "popcnt32", [e] ->
+     SR.UnboxedWord64 Type.Nat32,
+     compile_exp_as env ae (SR.UnboxedWord64 Type.Nat32) e ^^
+     G.i (Unary (Wasm_exts.Values.I64 I64Op.Popcnt)) ^^
+     TaggedSmallWord.msb_adjust Type.Nat32
+  | OtherPrim "popcntInt32", [e] ->
+     SR.UnboxedWord64 Type.Int32,
+     compile_exp_as env ae (SR.UnboxedWord64 Type.Int32) e ^^
+     G.i (Unary (Wasm_exts.Values.I64 I64Op.Popcnt))^^
+     TaggedSmallWord.msb_adjust Type.Int32
+  | OtherPrim "popcnt64", [e] ->
+     SR.UnboxedWord64 Type.Nat64,
+     compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e ^^
+     G.i (Unary (Wasm_exts.Values.I64 I64Op.Popcnt))
+  | OtherPrim "popcntInt64", [e] ->
+     SR.UnboxedWord64 Type.Int64,
+     compile_exp_as env ae (SR.UnboxedWord64 Type.Int64) e ^^
+     G.i (Unary (Wasm_exts.Values.I64 I64Op.Popcnt))
+  | OtherPrim "clz8", [e] ->
+     SR.UnboxedWord64 Type.Nat8,
+     compile_exp_as env ae (SR.UnboxedWord64 Type.Nat8) e ^^
+     TaggedSmallWord.clz_kernel Type.Nat8
+  | OtherPrim "clz16", [e] ->
+     SR.UnboxedWord64 Type.Nat16,
+     compile_exp_as env ae (SR.UnboxedWord64 Type.Nat16) e ^^
+     TaggedSmallWord.clz_kernel Type.Nat16
+  | OtherPrim "clzInt8", [e] ->
+     SR.UnboxedWord64 Type.Int8,
+     compile_exp_as env ae (SR.UnboxedWord64 Type.Int8) e ^^
+     TaggedSmallWord.clz_kernel Type.Int8
+  | OtherPrim "clzInt16", [e] ->
+     SR.UnboxedWord64 Type.Int16,
+     compile_exp_as env ae (SR.UnboxedWord64 Type.Int16) e ^^
+     TaggedSmallWord.clz_kernel Type.Int16
+  | OtherPrim "clz32", [e] ->
+     SR.UnboxedWord64 Type.Nat32,
+     compile_exp_as env ae (SR.UnboxedWord64 Type.Nat32) e ^^ 
+     TaggedSmallWord.clz_kernel Type.Nat32
+  | OtherPrim "clzInt32", [e] ->
+     SR.UnboxedWord64 Type.Int32,
+     compile_exp_as env ae (SR.UnboxedWord64 Type.Int32) e ^^ 
+     TaggedSmallWord.clz_kernel Type.Int32
+  | OtherPrim "clz64", [e] ->
+     SR.UnboxedWord64 Type.Nat64,
+     compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e ^^ 
+     G.i (Unary (Wasm_exts.Values.I64 I64Op.Clz))
+  | OtherPrim "clzInt64", [e] ->
+     SR.UnboxedWord64 Type.Int64,
+     compile_exp_as env ae (SR.UnboxedWord64 Type.Int64) e ^^ 
+     G.i (Unary (Wasm_exts.Values.I64 I64Op.Clz))
+  | OtherPrim "ctz8", [e] ->
+     SR.UnboxedWord64 Type.Nat8,
+     compile_exp_as env ae (SR.UnboxedWord64 Type.Nat8) e ^^
+     TaggedSmallWord.ctz_kernel Type.Nat8
+  | OtherPrim "ctz16", [e] ->
+     SR.UnboxedWord64 Type.Nat16,
+     compile_exp_as env ae (SR.UnboxedWord64 Type.Nat16) e ^^
+     TaggedSmallWord.ctz_kernel Type.Nat16
+  | OtherPrim "ctzInt8", [e] ->
+     SR.UnboxedWord64 Type.Int8,
+     compile_exp_as env ae (SR.UnboxedWord64 Type.Int8) e ^^
+     TaggedSmallWord.ctz_kernel Type.Int8
+  | OtherPrim "ctzInt16", [e] ->
+     SR.UnboxedWord64 Type.Int16,
+     compile_exp_as env ae (SR.UnboxedWord64 Type.Int16) e ^^
+     TaggedSmallWord.ctz_kernel Type.Int16
+  | OtherPrim "ctz32", [e] ->
+    SR.UnboxedWord64 Type.Nat32,
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat32) e ^^ 
+    TaggedSmallWord.ctz_kernel Type.Nat32
+  | OtherPrim "ctzInt32", [e] ->
+    SR.UnboxedWord64 Type.Int32,
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Int32) e ^^ 
+    TaggedSmallWord.ctz_kernel Type.Int32
+  | OtherPrim "ctz64", [e] ->
+    SR.UnboxedWord64 Type.Nat64,
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e ^^ 
+    G.i (Unary (Wasm_exts.Values.I64 I64Op.Ctz))
+  | OtherPrim "ctzInt64", [e] ->
+    SR.UnboxedWord64 Type.Int64,
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Int64) e ^^ 
+    G.i (Unary (Wasm_exts.Values.I64 I64Op.Ctz))
+
+  | OtherPrim "conv_Char_Text", [e] ->
+    SR.Vanilla,
+    compile_exp_vanilla env ae e ^^
+    Text.prim_showChar env
+
+  | OtherPrim "char_to_upper", [e] ->
+    compile_char_to_char_rts env ae e "char_to_upper"
+
+  | OtherPrim "char_to_lower", [e] ->
+    compile_char_to_char_rts env ae e "char_to_lower"
+
+  | OtherPrim "char_is_whitespace", [e] ->
+    compile_char_to_bool_rts env ae e "char_is_whitespace"
+
+  | OtherPrim "char_is_lowercase", [e] ->
+    compile_char_to_bool_rts env ae e "char_is_lowercase"
+
+  | OtherPrim "char_is_uppercase", [e] ->
+    compile_char_to_bool_rts env ae e "char_is_uppercase"
+
+  | OtherPrim "char_is_alphabetic", [e] ->
+    compile_char_to_bool_rts env ae e "char_is_alphabetic"
+
+  | OtherPrim "print", [e] ->
+    SR.unit,
+    compile_exp_vanilla env ae e ^^
+    IC.print_text env
+
+  | OtherPrim "text_lowercase", [e] ->
+    SR.Vanilla,
+    compile_exp_vanilla env ae e ^^
+    Text.lowercase env
+
+  | OtherPrim "text_uppercase", [e] ->
+    SR.Vanilla,
+    compile_exp_vanilla env ae e ^^
+    Text.uppercase env
+
+  | OtherPrim "performanceCounter", [e] ->
+    (SR.UnboxedWord64 Type.Nat64),
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat32) e ^^
+    IC.performance_counter env
+
+  | OtherPrim "trap", [e] ->
+    SR.Unreachable,
+    compile_exp_vanilla env ae e ^^
+    IC.trap_text env
+
+  | OtherPrim "principalOfBlob", e ->
+    const_sr SR.Vanilla (Blob.copy env Tagged.B Tagged.P)
+  | OtherPrim "blobOfPrincipal", e ->
+    const_sr SR.Vanilla (Blob.copy env Tagged.P Tagged.B)
+  | OtherPrim "principalOfActor", e ->
+    const_sr SR.Vanilla (Blob.copy env Tagged.A Tagged.P)
+
+  | OtherPrim "blobToArray", e ->
+    const_sr SR.Vanilla (Arr.ofBlob env Tagged.I)
+  | OtherPrim "blobToArrayMut", e ->
+    const_sr SR.Vanilla (Arr.ofBlob env Tagged.M)
+
+  | OtherPrim ("arrayToBlob" | "arrayMutToBlob"), e ->
+    const_sr SR.Vanilla (Arr.toBlob env)
+
+  | OtherPrim (("stableMemoryLoadNat32" | "stableMemoryLoadInt32") as p), [e] ->
+    let ty = Type.(if p = "stableMemoryLoadNat32" then Nat32 else Int32) in
+    SR.UnboxedWord64 ty,
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e ^^
+    StableMemoryInterface.load_word32 env ^^
+    G.i (Convert (Wasm_exts.Values.I64 I64Op.(if ty = Type.Nat32 then ExtendUI32 else ExtendSI32))) ^^
+    TaggedSmallWord.msb_adjust ty
+
+  | OtherPrim (("stableMemoryStoreNat32" | "stableMemoryStoreInt32") as p), [e1; e2] ->
+    let ty = Type.(if p = "stableMemoryStoreNat32" then Nat32 else Int32) in
+    SR.unit,
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e1 ^^
+    compile_exp_as env ae (SR.UnboxedWord64 ty) e2 ^^
+    TaggedSmallWord.lsb_adjust ty ^^
+    G.i (Convert (Wasm_exts.Values.I32 I32Op.WrapI64)) ^^
+    StableMemoryInterface.store_word32 env
+
+  | OtherPrim (("stableMemoryLoadNat8" | "stableMemoryLoadInt8") as p), [e] ->
+    let ty = Type.(if p = "stableMemoryLoadNat8" then Nat8 else Int8) in
+    SR.UnboxedWord64 ty,
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e ^^
+    StableMemoryInterface.load_word8 env ^^
+    G.i (Convert (Wasm_exts.Values.I64 I64Op.(if ty = Type.Nat8 then ExtendUI32 else ExtendSI32))) ^^
+    TaggedSmallWord.msb_adjust ty
+
+  (* Other prims, binary *)
+
+  | OtherPrim (("stableMemoryStoreNat8" | "stableMemoryStoreInt8") as p), [e1; e2] ->
+    let ty = Type.(if p = "stableMemoryStoreNat8" then Nat8 else Int8) in
+    SR.unit,
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e1 ^^
+    compile_exp_as env ae (SR.UnboxedWord64 ty) e2 ^^
+    TaggedSmallWord.lsb_adjust ty ^^
+    G.i (Convert (Wasm_exts.Values.I32 I32Op.WrapI64)) ^^
+    StableMemoryInterface.store_word8 env
+
+  | OtherPrim (("stableMemoryLoadNat16" | "stableMemoryLoadInt16") as p), [e] ->
+    let ty = Type.(if p = "stableMemoryLoadNat16" then Nat16 else Int16) in
+    SR.UnboxedWord64 ty,
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e ^^
+    StableMemoryInterface.load_word16 env ^^
+    G.i (Convert (Wasm_exts.Values.I64 I64Op.(if ty = Type.Nat16 then ExtendUI32 else ExtendSI32))) ^^
+    TaggedSmallWord.msb_adjust ty
+
+  | OtherPrim (("stableMemoryStoreNat16" | "stableMemoryStoreInt16") as p), [e1; e2] ->
+    let ty = Type.(if p = "stableMemoryStoreNat16" then Nat16 else Int16) in
+    SR.unit,
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e1 ^^
+    compile_exp_as env ae (SR.UnboxedWord64 ty) e2 ^^
+    TaggedSmallWord.lsb_adjust ty ^^
+    G.i (Convert (Wasm_exts.Values.I32 I32Op.WrapI64)) ^^
+    StableMemoryInterface.store_word16 env
+
+  | OtherPrim (("stableMemoryLoadNat64" | "stableMemoryLoadInt64") as p), [e] ->
+    let ty = Type.(if p = "stableMemoryLoadNat64" then Nat64 else Int64) in
+    SR.UnboxedWord64 ty,
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e ^^
+    StableMemoryInterface.load_word64 env
+
+  | OtherPrim (("stableMemoryStoreNat64" | "stableMemoryStoreInt64") as p), [e1; e2] ->
+    let ty = Type.(if p = "stableMemoryStoreNat64" then Nat64 else Int64) in
+    SR.unit,
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e1 ^^
+    compile_exp_as env ae (SR.UnboxedWord64 ty) e2 ^^
+    StableMemoryInterface.store_word64 env
+
+  | OtherPrim "stableMemoryLoadFloat", [e] ->
+    SR.UnboxedFloat64,
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e ^^
+    StableMemoryInterface.load_float64 env
+
+  | OtherPrim "stableMemoryStoreFloat", [e1; e2] ->
+    SR.unit,
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e1 ^^
+    compile_exp_as env ae SR.UnboxedFloat64 e2 ^^
+    StableMemoryInterface.store_float64 env
+
+  | OtherPrim "stableMemoryLoadBlob", [e1; e2] ->
+    SR.Vanilla,
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e1 ^^
+    compile_exp_as env ae SR.Vanilla e2 ^^
+    BigNum.to_word64_with env (Blob.lit env Tagged.T "Blob size out of bounds") ^^
+    StableMemoryInterface.load_blob env
+
+  | OtherPrim "stableMemoryStoreBlob", [e1; e2] ->
+    SR.unit,
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e1 ^^
+    compile_exp_as env ae SR.Vanilla e2 ^^
+    StableMemoryInterface.store_blob env
+
+  | OtherPrim "stableMemorySize", [] ->
+    SR.UnboxedWord64 Type.Nat64,
+    StableMemoryInterface.size env
+
+  | OtherPrim "stableMemoryGrow", [e] ->
+    SR.UnboxedWord64 Type.Nat64,
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat64) e ^^
+    StableMemoryInterface.grow env
+
+  | OtherPrim "stableVarQuery", [] ->
+    SR.Vanilla,
+    IC.get_self_reference env ^^
+    IC.actor_public_field env Type.(motoko_stable_var_info_fld.lab)
+
+  (* Other prims, binary*)
+  | OtherPrim "Array.init", [_;_] ->
+    const_sr SR.Vanilla (Arr.init env)
+  | OtherPrim "Array.tabulate", [_;_] ->
+    const_sr SR.Vanilla (Arr.tabulate env)
+  | OtherPrim "btst8", [_;_] ->
+    (* TODO: btstN returns Bool, not a small value *)
+    const_sr (SR.UnboxedWord64 Type.Nat8) (TaggedSmallWord.btst_kernel env Type.Nat8)
+  | OtherPrim "btst16", [_;_] ->
+    const_sr (SR.UnboxedWord64 Type.Nat16) (TaggedSmallWord.btst_kernel env Type.Nat16)
+  | OtherPrim "btstInt8", [_;_] ->
+    const_sr (SR.UnboxedWord64 Type.Int8) (TaggedSmallWord.btst_kernel env Type.Int8)
+  | OtherPrim "btstInt16", [_;_] ->
+    const_sr (SR.UnboxedWord64 Type.Int16) (TaggedSmallWord.btst_kernel env Type.Int16)
+  | OtherPrim "btst32", [_;_] ->
+     const_sr (SR.UnboxedWord64 Type.Nat32) (TaggedSmallWord.btst_kernel env Type.Nat32)
+  | OtherPrim "btstInt32", [_;_] ->
+     const_sr (SR.UnboxedWord64 Type.Int32) (TaggedSmallWord.btst_kernel env Type.Int32) (* ! *)
+  | OtherPrim "btst64", [_;_] ->
+    const_sr (SR.UnboxedWord64 Type.Nat64) (Word64.btst_kernel env)
+  | OtherPrim "btstInt64", [_;_] ->
+    const_sr (SR.UnboxedWord64 Type.Int64) (Word64.btst_kernel env)
+
+  | OtherPrim "setCandidLimits", [e1; e2; e3] ->
+    SR.unit,
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat32) e1 ^^
+    TaggedSmallWord.lsb_adjust Type.Nat32 ^^
+    Serialization.Registers.set_value_numerator env ^^
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat32) e2 ^^
+    TaggedSmallWord.lsb_adjust Type.Nat32 ^^
+    Serialization.Registers.set_value_denominator env ^^
+    Serialization.Registers.get_value_denominator env ^^
+    E.else_trap_with env "Candid limit denominator cannot be zero" ^^
+    compile_exp_as env ae (SR.UnboxedWord64 Type.Nat32) e3 ^^
+    TaggedSmallWord.lsb_adjust Type.Nat32 ^^
+    Serialization.Registers.set_value_bias env
+
+  | OtherPrim "getCandidLimits", [] ->
+    SR.UnboxedTuple 3,
+    Serialization.Registers.get_value_numerator env ^^
+    TaggedSmallWord.msb_adjust Type.Nat32 ^^
+    TaggedSmallWord.tag env Type.Nat32 ^^
+    Serialization.Registers.get_value_denominator env ^^
+    TaggedSmallWord.msb_adjust Type.Nat32 ^^
+    TaggedSmallWord.tag env Type.Nat32 ^^
+    Serialization.Registers.get_value_bias env ^^
+    TaggedSmallWord.msb_adjust Type.Nat32 ^^
+    TaggedSmallWord.tag env Type.Nat32
+
+  (* Coercions for abstract types *)
+  | CastPrim (_,_), [e] ->
+    compile_exp env ae e
+
+  | DecodeUtf8, [_] ->
+    const_sr SR.Vanilla (Text.of_blob env)
+  | EncodeUtf8, [_] ->
+    const_sr SR.Vanilla (Text.to_blob env)
+
+  (* textual to bytes *)
+  | BlobOfIcUrl, [_] ->
+    const_sr SR.Vanilla (E.call_import env "rts" "blob_of_principal")
+  (* The other direction *)
+  | IcUrlOfBlob, [_] ->
+    const_sr SR.Vanilla (E.call_import env "rts" "principal_of_blob")
+
+  (* Actor ids are blobs in the RTS *)
+  | ActorOfIdBlob _, [e] ->
+    SR.Vanilla,
+    let (set_blob, get_blob) = new_local env "blob" in
+    compile_exp_vanilla env ae e ^^
+    set_blob ^^
+    get_blob ^^
+    Blob.len env ^^
+    compile_unboxed_const 29L ^^
+    compile_comparison I64Op.LeU ^^
+    E.else_trap_with env "blob too long for actor principal" ^^
+    get_blob ^^
+    Blob.copy env Tagged.B Tagged.A
+
+  | SelfRef _, [] ->
+    SR.Vanilla, IC.get_self_reference env
+
+  | ICArgDataPrim, [] ->
+    SR.Vanilla, IC.arg_data env
+
+  | ICReplyPrim ts, [e] ->
+    SR.unit, begin match E.mode env with
+    | Flags.ICMode | Flags.RefMode ->
+      compile_exp_vanilla env ae e ^^
+      (* TODO: We can try to avoid the boxing and pass the arguments to
+        serialize individually *)
+      Serialization.serialize env ts ^^
+      IC.reply_with_data env
+    | _ ->
+      E.trap_with env (Printf.sprintf "cannot reply when running locally")
+    end
+
+  | ICRejectPrim, [e] ->
+    SR.unit, IC.reject env (compile_exp_vanilla env ae e)
+
+  | ICCallerPrim, [] ->
+    SR.Vanilla, IC.caller env
+
+  | ICCallPrim, [f;e;k;r;c] ->
+    SR.unit, begin
+    (* TBR: Can we do better than using the notes? *)
+    let _, _, _, ts1, _ = Type.as_func f.note.Note.typ in
+    let _, _, _, ts2, _ = Type.as_func k.note.Note.typ in
+    let (set_meth_pair, get_meth_pair) = new_local env "meth_pair" in
+    let (set_arg, get_arg) = new_local env "arg" in
+    let (set_k, get_k) = new_local env "k" in
+    let (set_r, get_r) = new_local env "r" in
+    let (set_c, get_c) = new_local env "c" in
+    let add_cycles = Internals.add_cycles env ae in
+    compile_exp_vanilla env ae f ^^ set_meth_pair ^^
+    compile_exp_vanilla env ae e ^^ set_arg ^^
+    compile_exp_vanilla env ae k ^^ set_k ^^
+    compile_exp_vanilla env ae r ^^ set_r ^^
+    compile_exp_vanilla env ae c ^^ set_c ^^
+    FuncDec.ic_call env ts1 ts2 get_meth_pair get_arg get_k get_r get_c add_cycles
+    end
+  | ICCallRawPrim, [p;m;a;k;r;c] ->
+    SR.unit, begin
+    let set_meth_pair, get_meth_pair = new_local env "meth_pair" in
+    let set_arg, get_arg = new_local env "arg" in
+    let set_k, get_k = new_local env "k" in
+    let set_r, get_r = new_local env "r" in
+    let set_c, get_c = new_local env "c" in
+    let add_cycles = Internals.add_cycles env ae in
+    compile_exp_vanilla env ae p ^^
+    compile_exp_vanilla env ae m ^^ Text.to_blob env ^^
+    Tagged.load_forwarding_pointer env ^^
+    Tuple.from_stack env 2 ^^ set_meth_pair ^^
+    compile_exp_vanilla env ae a ^^ set_arg ^^
+    compile_exp_vanilla env ae k ^^ set_k ^^
+    compile_exp_vanilla env ae r ^^ set_r ^^
+    compile_exp_vanilla env ae c ^^ set_c ^^
+    FuncDec.ic_call_raw env get_meth_pair get_arg get_k get_r get_c add_cycles
+    end
+
+  | ICMethodNamePrim, [] ->
+    SR.Vanilla, IC.method_name env
+
+  | ICStableRead ty, [] ->
+    SR.Vanilla,
+    Persistence.load env ty
+  | ICStableWrite ty, [] ->
+    SR.unit,
+    Persistence.save env ty
+
+  (* Cycles *)
+  | SystemCyclesBalancePrim, [] ->
+    SR.Vanilla, Cycles.balance env
+  | SystemCyclesAddPrim, [e1] ->
+    SR.unit, compile_exp_vanilla env ae e1 ^^ Cycles.add env
+  | SystemCyclesAcceptPrim, [e1] ->
+    SR.Vanilla, compile_exp_vanilla env ae e1 ^^ Cycles.accept env
+  | SystemCyclesAvailablePrim, [] ->
+    SR.Vanilla, Cycles.available env
+  | SystemCyclesRefundedPrim, [] ->
+    SR.Vanilla, Cycles.refunded env
+
+  | SetCertifiedData, [e1] ->
+    SR.unit, compile_exp_vanilla env ae e1 ^^ IC.set_certified_data env
+  | GetCertificate, [] ->
+    SR.Vanilla,
+    IC.get_certificate env
+
+  (* Unknown prim *)
+  | _ -> SR.Unreachable, todo_trap env "compile_prim_invocation" (Arrange_ir.prim p)
+  end
+
+(* Compile, infer and return stack representation *)
+and compile_exp (env : E.t) ae exp =
+  compile_exp_with_hint env ae None exp
+
+(* Compile to given stack representation *)
+and compile_exp_as env ae sr_out e =
+  let sr_in, code = compile_exp_with_hint env ae (Some sr_out) e in
+  code ^^ StackRep.adjust env sr_in sr_out
+
+and single_case e (cs : Ir.case list) =
+  match cs, e.note.Note.typ with
+  | [{it={pat={it=TagP (l, _);_}; _}; _}], Type.(Variant [{lab; _}]) -> l = lab
+  | _ -> false
+
+and known_tag_pat p = TagP ("", p)
+
+and simplify_cases e (cs : Ir.case list) =
+  match cs, e.note.Note.typ with
+  (* for a 2-cased variant type, the second comparison can be omitted when the first pattern
+     (with irrefutable subpattern) didn't match, and the pattern types line up *)
+  | [{it={pat={it=TagP (l1, ip); _}; _}; _} as c1; {it={pat={it=TagP (l2, pat'); _} as pat2; exp}; _} as c2], Type.(Variant [{lab=el1; _}; {lab=el2; _}])
+       when Ir_utils.is_irrefutable ip
+            && (l1 = el1 || l1 = el2)
+            && (l2 = el1 || l2 = el2) ->
+     [c1; {c2 with it = {exp; pat = {pat2 with it = known_tag_pat pat'}}}]
+  | _ -> cs
+
+(* Compile, infer and return stack representation, taking the hint into account *)
+and compile_exp_with_hint (env : E.t) ae sr_hint exp =
+  (fun (sr,code) -> (sr, G.with_region exp.at code)) @@
+  if exp.note.Note.const
+  then let (c, fill) = compile_const_exp env ae exp in fill env ae; (SR.Const c, G.nop)
+  else match exp.it with
+  | PrimE (p, es) when List.exists (fun e -> Type.is_non e.note.Note.typ) es ->
+    (* Handle dead code separately, so that we can rely on useful type
+       annotations below *)
+    SR.Unreachable,
+    G.concat_map (compile_exp_ignore env ae) es ^^
+    G.i Unreachable
+
+  | PrimE (p, es) ->
+    compile_prim_invocation (env : E.t) ae p es exp.at
+  | VarE (_, var) ->
+    Var.get_val env ae var
+  | AssignE (e1,e2) ->
+    SR.unit,
+    let (prepare_code, sr, store_code) = compile_lexp env ae e1 in
+    prepare_code ^^
+    compile_exp_as env ae sr e2 ^^
+    store_code
+  | LitE l ->
+    compile_lit l
+  | IfE (scrut, e1, e2) ->
+    let code_scrut = compile_exp_as_test env ae scrut in
+    let sr1, code1 = compile_exp_with_hint env ae sr_hint e1 in
+    let sr2, code2 = compile_exp_with_hint env ae sr_hint e2 in
+    (* Use the expected stackrep, if given, else infer from the branches *)
+    let sr = match sr_hint with
+      | Some sr -> sr
+      | None -> StackRep.join sr1 sr2
+    in
+    sr,
+    code_scrut ^^
+    FakeMultiVal.if_ env
+      (StackRep.to_block_type env sr)
+      (code1 ^^ StackRep.adjust env sr1 sr)
+      (code2 ^^ StackRep.adjust env sr2 sr)
+  | BlockE (decs, exp) ->
+    let captured = Freevars.captured_vars (Freevars.exp exp) in
+    let ae', codeW1 = compile_decs env ae decs captured in
+    let (sr, code2) = compile_exp_with_hint env ae' sr_hint exp in
+    (sr, codeW1 code2)
+  | LabelE (name, _ty, e) ->
+    (* The value here can come from many places -- the expression,
+       or any of the nested returns. Hard to tell which is the best
+       stack representation here.
+       So let’s go with Vanilla. *)
+    SR.Vanilla,
+    E.block_ env (StackRep.to_block_type env SR.Vanilla) (
+      G.with_current_depth (fun depth ->
+        let ae1 = VarEnv.add_label ae name depth in
+        compile_exp_vanilla env ae1 e
+      )
+    )
+  | LoopE e ->
+    SR.Unreachable,
+    let ae' = VarEnv.{ ae with lvl = NotTopLvl } in
+    G.loop0 (compile_exp_unit env ae' e ^^ G.i (Br (nr 0l))
+    )
+    ^^
+   G.i Unreachable
+
+  | SwitchE (e, cs) when single_case e cs ->
+    let code1 = compile_exp_vanilla env ae e in
+    let [@warning "-8"] [{it={pat={it=TagP (_, pat');_} as pat; exp}; _}] = cs in
+    let ae1, pat_code = compile_pat_local env ae {pat with it = known_tag_pat pat'} in
+    let sr, rhs_code = compile_exp_with_hint env ae1 sr_hint exp in
+
+    (* Use the expected stackrep, if given, else infer from the branches *)
+    let final_sr = match sr_hint with
+      | Some sr -> sr
+      | None -> sr
+    in
+
+    final_sr,
+    (* Run rest in block to exit from *)
+    FakeMultiVal.block_ env (StackRep.to_block_type env final_sr) (fun branch_code ->
+       orsPatternFailure env (List.map (fun (sr, c) ->
+          c ^^^ CannotFail (StackRep.adjust env sr final_sr ^^ branch_code)
+       ) [sr, CannotFail code1 ^^^ pat_code ^^^ CannotFail rhs_code]) ^^
+       G.i Unreachable (* We should always exit using the branch_code *)
+    )
+
+  | SwitchE (e, cs) ->
+    let code1 = compile_exp_vanilla env ae e in
+    let (set_i, get_i) = new_local env "switch_in" in
+
+    (* compile subexpressions and collect the provided stack reps *)
+    let codes = List.map (fun {it={pat; exp=e}; _} ->
+      let (ae1, pat_code) = compile_pat_local env ae pat in
+      let (sr, rhs_code) = compile_exp_with_hint env ae1 sr_hint e in
+      (sr, CannotFail get_i ^^^ pat_code ^^^ CannotFail rhs_code)
+      ) (simplify_cases e cs) in
+
+    (* Use the expected stackrep, if given, else infer from the branches *)
+    let final_sr = match sr_hint with
+      | Some sr -> sr
+      | None -> StackRep.joins (List.map fst codes)
+    in
+
+    final_sr,
+    (* Run scrut *)
+    code1 ^^ set_i ^^
+    (* Run rest in block to exit from *)
+    FakeMultiVal.block_ env (StackRep.to_block_type env final_sr) (fun branch_code ->
+       orsPatternFailure env (List.map (fun (sr, c) ->
+          c ^^^ CannotFail (StackRep.adjust env sr final_sr ^^ branch_code)
+       ) codes) ^^
+       G.i Unreachable (* We should always exit using the branch_code *)
+    )
+  (* Async-wait lowering support features *)
+  | DeclareE (name, typ, e) ->
+    let ae1, i = VarEnv.add_local_with_heap_ind env ae name typ in
+    let sr, code = compile_exp env ae1 e in
+    sr,
+    MutBox.alloc env ^^ G.i (LocalSet (nr i)) ^^
+    code
+  | DefineE (name, _, e) ->
+    SR.unit,
+    let pre_code, sr, code = Var.set_val env ae name in
+    pre_code ^^
+    compile_exp_as env ae sr e ^^
+    code
+  | FuncE (x, sort, control, typ_binds, args, res_tys, e) ->
+    let captured = Freevars.captured exp in
+    let return_tys = match control with
+      | Type.Returns -> res_tys
+      | Type.Replies -> []
+      | Type.Promises -> assert false in
+    let return_arity = List.length return_tys in
+    let mk_body env1 ae1 = compile_exp_as env1 ae1 (StackRep.of_arity return_arity) e in
+    FuncDec.lit env ae x sort control captured args mk_body return_tys exp.at
+  | SelfCallE (ts, exp_f, exp_k, exp_r, exp_c) ->
+    SR.unit,
+    let (set_future, get_future) = new_local env "future" in
+    let (set_k, get_k) = new_local env "k" in
+    let (set_r, get_r) = new_local env "r" in
+    let (set_c, get_c) = new_local env "c" in
+    let mk_body env1 ae1 = compile_exp_as env1 ae1 SR.unit exp_f in
+    let captured = Freevars.captured exp_f in
+    let add_cycles = Internals.add_cycles env ae in
+    FuncDec.async_body env ae ts captured mk_body exp.at ^^
+    Tagged.load_forwarding_pointer env ^^
+    set_future ^^
+
+    compile_exp_vanilla env ae exp_k ^^ set_k ^^
+    compile_exp_vanilla env ae exp_r ^^ set_r ^^
+    compile_exp_vanilla env ae exp_c ^^ set_c ^^
+
+    FuncDec.ic_self_call env ts
+      IC.(get_self_reference env ^^
+          actor_public_field env async_method_name)
+      get_future
+      get_k
+      get_r
+      get_c
+      add_cycles
+  | ActorE (ds, fs, _, _) ->
+    fatal "Local actors not supported by backend"
+  | NewObjE (Type.(Object | Module | Memory) as _sort, fs, _) ->
+    (*
+    We can enable this warning once we treat everything as static that
+    mo_frontend/static.ml accepts, including _all_ literals.
+    if sort = Type.Module then Printf.eprintf "%s" "Warning: Non-static module\n";
+    *)
+    SR.Vanilla,
+    let fs' = fs |> List.map
+      (fun (f : Ir.field) -> (f.it.name, fun () ->
+        if Type.is_mut f.note
+        then Var.get_aliased_box env ae f.it.var
+        else Var.get_val_vanilla env ae f.it.var)) in
+    Object.lit_raw env fs'
+  | _ -> SR.unit, todo_trap env "compile_exp" (Arrange_ir.exp exp)
+
+and compile_exp_ignore env ae e =
+  let sr, code = compile_exp env ae e in
+  code ^^ StackRep.drop env sr
+
+and compile_exp_as_opt env ae sr_out_o e =
+  let sr_in, code = compile_exp_with_hint env ae sr_out_o e in
+  G.with_region e.at (
+    code ^^
+    match sr_out_o with
+    | None -> StackRep.drop env sr_in
+    | Some sr_out -> StackRep.adjust env sr_in sr_out
+  )
+
+and compile_exp_vanilla (env : E.t) ae exp =
+  compile_exp_as env ae SR.Vanilla exp
+
+and compile_exp_unit (env : E.t) ae exp =
+  compile_exp_as env ae SR.unit exp
+
+(* compiles to something that works with IfE or Eqz
+   (SR.UnboxedWord64 or SR.Vanilla are _both_ ok)
+*)
+and compile_exp_as_test env ae e =
+  let sr, code = compile_exp env ae e in
+  code ^^
+  (if sr != SR.bool then StackRep.adjust env sr SR.Vanilla else G.nop)
+
+(* Compile a prim of type Char -> Char to a RTS call. *)
+and compile_char_to_char_rts env ae exp rts_fn =
+  SR.UnboxedWord64 Type.Char,
+  compile_exp_as env ae (SR.UnboxedWord64 Type.Char) exp ^^
+  TaggedSmallWord.lsb_adjust_codepoint env ^^
+  G.i (Convert (Wasm_exts.Values.I32 I32Op.WrapI64)) ^^
+  E.call_import env "rts" rts_fn ^^
+  G.i (Convert (Wasm_exts.Values.I64 I64Op.ExtendUI32)) ^^
+  TaggedSmallWord.msb_adjust_codepoint
+
+
+(* Compile a prim of type Char -> Bool to a RTS call. The RTS function should
+   have type int32_t -> int32_t where the return value is 0 for 'false' and 1
+   for 'true'. *)
+and compile_char_to_bool_rts (env : E.t) (ae : VarEnv.t) exp rts_fn =
+  SR.bool,
+  compile_exp_as env ae (SR.UnboxedWord64 Type.Char) exp ^^
+  TaggedSmallWord.lsb_adjust_codepoint env ^^
+  G.i (Convert (Wasm_exts.Values.I32 I32Op.WrapI64)) ^^
+  (* The RTS function returns Motoko True/False values (which are represented as
+     1 and 0, respectively) so we don't need any marshalling *)
+  E.call_import env "rts" rts_fn ^^
+  Bool.from_rts_int32
+
+(*
+The compilation of declarations (and patterns!) needs to handle mutual recursion.
+This requires conceptually three passes:
+ 1. First we need to collect all names bound in a block,
+    and find locations for then (which extends the environment).
+    The environment is extended monotonically: The type-checker ensures that
+    a Block does not bind the same name twice.
+    We would not need to pass in the environment, just out ... but because
+    it is bundled in the E.t type, threading it through is also easy.
+
+ 2. We need to allocate memory for them, and store the pointer in the
+    WebAssembly local, so that they can be captured by closures.
+
+ 3. We go through the declarations, generate the actual code and fill the
+    allocated memory.
+    This includes creating the actual closure references.
+
+We could do this in separate functions, but I chose to do it in one
+ * it means all code related to one constructor is in one place and
+ * when generating the actual code, we still “know” the id of the local that
+   has the memory location, and don’t have to look it up in the environment.
+
+The first phase works with the `pre_env` passed to `compile_dec`,
+while the third phase is a function that expects the final environment. This
+enabled mutual recursion.
+*)
+
+
+and compile_lit_pat env l =
+  match l with
+  | NullLit ->
+    Opt.is_null env
+  | BoolLit true ->
+    G.nop
+  | BoolLit false ->
+    compile_test I64Op.Eqz
+  | (NatLit _ | IntLit _) ->
+    compile_lit_as env SR.Vanilla l ^^
+    BigNum.compile_eq env
+  | Nat8Lit _ ->
+    compile_lit_as env SR.Vanilla l ^^
+    compile_eq env Type.(Prim Nat8)
+  | Nat16Lit _ ->
+    compile_lit_as env SR.Vanilla l ^^
+    compile_eq env Type.(Prim Nat16)
+  | Nat32Lit _ ->
+    compile_lit_as env SR.Vanilla l ^^
+    compile_eq env Type.(Prim Nat32)
+  | Nat64Lit _ ->
+    BoxedWord64.unbox env Type.Nat64 ^^
+    compile_lit_as env (SR.UnboxedWord64 Type.Nat64) l ^^
+    compile_eq env Type.(Prim Nat64)
+  | Int8Lit _ ->
+    compile_lit_as env SR.Vanilla l ^^
+    compile_eq env Type.(Prim Int8)
+  | Int16Lit _ ->
+    compile_lit_as env SR.Vanilla l ^^
+    compile_eq env Type.(Prim Int16)
+  | Int32Lit _ ->
+    compile_lit_as env SR.Vanilla l ^^
+    compile_eq env Type.(Prim Int32)
+  | Int64Lit _ ->
+    BoxedWord64.unbox env Type.Int64 ^^
+    compile_lit_as env (SR.UnboxedWord64 Type.Int64) l ^^
+    compile_eq env Type.(Prim Int64)
+  | CharLit _ ->
+    compile_lit_as env SR.Vanilla l ^^
+    compile_eq env Type.(Prim Char)
+  | TextLit t ->
+    compile_lit_as env SR.Vanilla l ^^
+    Text.compare env Operator.EqOp
+  | BlobLit t ->
+    compile_lit_as env SR.Vanilla l ^^
+    Blob.compare env (Some Operator.EqOp)
+  | FloatLit _ ->
+    todo_trap env "compile_lit_pat" (Arrange_ir.lit l)
+
+and fill_pat env ae pat : patternCode =
+  PatCode.with_region pat.at @@
+  match pat.it with
+  | _ when Ir_utils.is_irrefutable_nonbinding pat -> CannotFail (G.i Drop)
+  | WildP -> assert false (* matched above *)
+  | OptP p when Ir_utils.is_irrefutable_nonbinding p ->
+      CanFail (fun fail_code ->
+        Opt.is_some env ^^
+        E.if0 G.nop fail_code)
+  | OptP p ->
+      let (set_x, get_x) = new_local env "opt_scrut" in
+      CanFail (fun fail_code ->
+        set_x ^^
+        get_x ^^
+        Opt.is_some env ^^
+        E.if0
+          ( get_x ^^
+            Opt.project env ^^
+            with_fail fail_code (fill_pat env ae p)
+          )
+          fail_code
+      )
+  | TagP ("", p) -> (* these only come from known_tag_pat *)
+    if Ir_utils.is_irrefutable_nonbinding p
+    then CannotFail (G.i Drop)
+    else CannotFail (Variant.project env) ^^^ fill_pat env ae p
+  | TagP (l, p) when Ir_utils.is_irrefutable_nonbinding p ->
+      CanFail (fun fail_code ->
+        Variant.test_is env l ^^
+        E.if0 G.nop fail_code)
+  | TagP (l, p) ->
+      let (set_x, get_x) = new_local env "tag_scrut" in
+      CanFail (fun fail_code ->
+        set_x ^^
+        get_x ^^
+        Variant.test_is env l ^^
+        E.if0
+          ( get_x ^^
+            Variant.project env ^^
+            with_fail fail_code (fill_pat env ae p)
+          )
+          fail_code
+      )
+  | LitP l ->
+      CanFail (fun fail_code ->
+        compile_lit_pat env l ^^
+        E.if0 G.nop fail_code)
+  | VarP name ->
+      CannotFail (Var.set_val_vanilla_from_stack env ae name)
+  | TupP ps ->
+      let (set_i, get_i) = new_local env "tup_scrut" in
+      let rec go i = function
+        | [] -> CannotFail G.nop
+        | p::ps ->
+          let code1 = fill_pat env ae p in
+          let code2 = go (Int64.add i 1L) ps in
+          CannotFail (get_i ^^ Tuple.load_n env i) ^^^ code1 ^^^ code2 in
+      CannotFail set_i ^^^ go 0L ps
+  | ObjP pfs ->
+      let project = compile_load_field env pat.note in
+      let (set_i, get_i) = new_local env "obj_scrut" in
+      let rec go = function
+        | [] -> CannotFail G.nop
+        | {it={name; pat}; _}::pfs' ->
+          let code1 = fill_pat env ae pat in
+          let code2 = go pfs' in
+          CannotFail (get_i ^^ project name) ^^^ code1 ^^^ code2 in
+      CannotFail set_i ^^^ go pfs
+  | AltP (p1, p2) ->
+      let code1 = fill_pat env ae p1 in
+      let code2 = fill_pat env ae p2 in
+      let (set_i, get_i) = new_local env "alt_scrut" in
+      CannotFail set_i ^^^
+      orElse (CannotFail get_i ^^^ code1)
+             (CannotFail get_i ^^^ code2)
+
+and alloc_pat_local env ae pat =
+  let d = Freevars.pat pat in
+  AllocHow.M.fold (fun v typ ae ->
+    let (ae1, _i) = VarEnv.add_direct_local env ae v SR.Vanilla typ
+    in ae1
+  ) d ae
+
+and alloc_pat env ae how pat : VarEnv.t * G.t  =
+  (fun (ae, code) -> (ae, G.with_region pat.at code)) @@
+  let d = Freevars.pat pat in
+  AllocHow.M.fold (fun v typ (ae, code0) ->
+    let ae1, code1 = AllocHow.add_local env ae how v typ
+    in (ae1, code0 ^^ code1)
+  ) d (ae, G.nop)
+
+and compile_pat_local env ae pat : VarEnv.t * patternCode =
+  (* It returns:
+     - the extended environment
+     - the patternCode to do the pattern matching.
+       This expects the  undestructed value is on top of the stack,
+       consumes it, and fills the heap.
+       If the pattern matches, execution continues (with nothing on the stack).
+       If the pattern does not match, it fails (in the sense of PatCode.CanFail)
+  *)
+  let ae1 = alloc_pat_local env ae pat in
+  let fill_code = fill_pat env ae1 pat in
+  (ae1, fill_code)
+
+(* Used for let patterns:
+   If the pattern can consume its scrutinee in a better form than vanilla (e.g.
+   unboxed tuple, unboxed 32/64), lets do that.
+*)
+and compile_unboxed_pat env ae how pat
+  : VarEnv.t * G.t * G.t * SR.t option * G.t =
+  (* It returns:
+     - the extended environment
+     - the code to allocate memory
+     - the code to prepare the stack (e.g. push destination addresses)
+       before the scrutinee is pushed
+     - the desired stack rep. None means: Do not even push the scrutinee.
+     - the code to do the pattern matching.
+       This expects the undestructed value is on top of the stack,
+       consumes it, and fills the heap
+       If the pattern does not match, it traps with pattern failure
+  *)
+  let (ae1, alloc_code) = alloc_pat env ae how pat in
+  let pre_code, sr, fill_code = match pat.it with
+    (* Nothing to match: Do not even put something on the stack *)
+    | WildP -> G.nop, None, G.nop
+    (* Tuple patterns *)
+    | TupP ps when List.length ps <> 1 ->
+      G.nop,
+      Some (SR.UnboxedTuple (List.length ps)),
+      (* We have to fill the pattern in reverse order, to take things off the
+         stack. This is only ok as long as patterns have no side effects.
+      *)
+      G.concat_mapi (fun i p -> orPatternFailure env (fill_pat env ae1 p)) (List.rev ps)
+    (* Variable patterns *)
+    | VarP name ->
+      let pre_code, sr, code = Var.set_val env ae1 name in
+      pre_code, Some sr, code
+    (* The general case: Create a single value, match that. *)
+    | _ ->
+      G.nop,
+      Some SR.Vanilla,
+      orPatternFailure env (fill_pat env ae1 pat) in
+  let pre_code = G.with_region pat.at pre_code in
+  let fill_code = G.with_region pat.at fill_code in
+  (ae1, alloc_code, pre_code, sr, fill_code)
+
+and compile_dec env pre_ae how v2en dec : VarEnv.t * G.t * (VarEnv.t -> scope_wrap) =
+  (fun (pre_ae, alloc_code, mk_code, wrap) ->
+       G.(pre_ae, with_region dec.at alloc_code, fun ae body_code ->
+          with_region dec.at (mk_code ae) ^^ wrap body_code)) @@
+
+  match dec.it with
+  (* A special case for public methods *)
+  (* This relies on the fact that in the top-level mutually recursive group, no shadowing happens. *)
+  | LetD ({it = VarP v; _}, e) when E.NameEnv.mem v v2en ->
+    let (const, fill) = compile_const_exp env pre_ae e in
+    let fi = match const with
+      | Const.Message fi -> fi
+      | _ -> assert false in
+    let pre_ae1 = VarEnv.add_local_public_method pre_ae v (fi, (E.NameEnv.find v v2en)) e.note.Note.typ in
+    G.( pre_ae1, nop, (fun ae -> fill env ae; nop), unmodified)
+
+  (* A special case for constant expressions *)
+  | LetD (p, e) when e.note.Note.const ->
+    (* constant expression matching with patterns is fully decidable *)
+    if const_exp_matches_pat env pre_ae p e then (* not refuted *)
+      let extend, fill = compile_const_dec env pre_ae dec in
+      G.(extend pre_ae, nop, (fun ae -> fill env ae; nop), unmodified)
+    else (* refuted *)
+      (pre_ae, G.nop, (fun _ -> PatCode.patternFailTrap env), unmodified)
+
+  | LetD (p, e) ->
+    let (pre_ae1, alloc_code, pre_code, sr, fill_code) = compile_unboxed_pat env pre_ae how p in
+    ( pre_ae1, alloc_code,
+      (fun ae -> pre_code ^^ compile_exp_as_opt env ae sr e ^^ fill_code),
+      unmodified
+    )
+
+  | VarD (name, content_typ, e) ->
+    assert AllocHow.(match M.find_opt name how with
+                     | Some (LocalMut _ | StoreHeap | StoreStatic) -> true
+                     | _ -> false);
+    let var_typ = Type.Mut content_typ in
+    let pre_ae1, alloc_code = AllocHow.add_local env pre_ae how name var_typ in
+    ( pre_ae1,
+      alloc_code,
+      (fun ae -> let pre_code, sr, code = Var.set_val env ae name in
+                 pre_code ^^ compile_exp_as env ae sr e ^^ code),
+      unmodified
+    )
+
+  | RefD (name, typ, { it = DotLE (e, n); _ }) ->
+    let pre_ae1, alloc_code = AllocHow.add_local_for_alias env pre_ae how name typ in
+
+    ( pre_ae1,
+      alloc_code,
+      (fun ae ->
+        compile_exp_vanilla env ae e ^^
+        Object.load_idx_raw env n ^^
+        Var.capture_aliased_box env ae name),
+      unmodified
+    )
+  | RefD _ -> assert false
+
+and compile_decs_public env pre_ae decs v2en captured_in_body : VarEnv.t * scope_wrap =
+  let how = AllocHow.decs pre_ae decs captured_in_body in
+  let rec go pre_ae = function
+    | []        -> (pre_ae, G.nop, fun _ -> unmodified)
+    | [dec]     -> compile_dec env pre_ae how v2en dec
+    | dec::decs ->
+        let (pre_ae1, alloc_code1, mk_codeW1) = compile_dec env pre_ae how v2en dec in
+        let (pre_ae2, alloc_code2, mk_codeW2) = go              pre_ae1 decs in
+        ( pre_ae2,
+          alloc_code1 ^^ alloc_code2,
+          fun ae -> let codeW1 = mk_codeW1 ae in
+                    let codeW2 = mk_codeW2 ae in
+                    fun body_code -> codeW1 (codeW2 body_code)
+        ) in
+  let (ae1, alloc_code, mk_codeW) = go pre_ae decs in
+  (ae1, fun body_code -> alloc_code ^^ mk_codeW ae1 body_code)
+
+and compile_decs env ae decs captured_in_body : VarEnv.t * scope_wrap =
+  compile_decs_public env ae decs E.NameEnv.empty captured_in_body
+
+(* This compiles expressions determined to be const as per the analysis in
+   ir_passes/const.ml. See there for more details.
+*)
+and compile_const_exp env pre_ae exp : Const.v * (E.t -> VarEnv.t -> unit) =
+  match exp.it with
+  | FuncE (name, sort, control, typ_binds, args, res_tys, e) ->
+    let fun_rhs =
+
+      (* a few prims cannot be safely inlined *)
+      let inlineable_prim = function
+      | RetPrim -> false
+      | BreakPrim _ -> false
+      | ThrowPrim -> fatal "internal error: left-over ThrowPrim"
+      | _ -> true in
+
+      match sort, control, typ_binds, e.it with
+      (* Special cases for prim-wrapping functions *)
+
+      | Type.Local, Type.Returns, [], PrimE (prim, prim_args) when
+          inlineable_prim prim &&
+          List.length args = List.length prim_args &&
+          List.for_all2 (fun p a -> a.it = VarE (Const, p.it)) args prim_args ->
+        Const.PrimWrapper prim
+      | _, _, _, _ -> Const.Complicated
+    in
+    let return_tys = match control with
+      | Type.Returns -> res_tys
+      | Type.Replies -> []
+      | Type.Promises -> assert false in
+    let mk_body env ae =
+      List.iter (fun v ->
+        if not (VarEnv.NameEnv.mem v ae.VarEnv.vars)
+        then fatal "internal error: const \"%s\": captures \"%s\", not found in static environment\n" name v
+      ) (Freevars.M.keys (Freevars.exp e));
+      compile_exp_as env ae (StackRep.of_arity (List.length return_tys)) e in
+    FuncDec.closed env sort control name args mk_body fun_rhs return_tys exp.at
+  | BlockE (decs, e) ->
+    let (extend, fill1) = compile_const_decs env pre_ae decs in
+    let ae' = extend pre_ae in
+    let (c, fill2) = compile_const_exp env ae' e in
+    (c, fun env ae ->
+      let ae' = extend ae in
+      fill1 env ae';
+      fill2 env ae')
+  | VarE (_, v) ->
+    let c =
+      match VarEnv.lookup_var pre_ae v with
+      | Some (VarEnv.Const c) -> c
+      | _ -> fatal "compile_const_exp/VarE: \"%s\" not found" v
+    in
+    (c, fun _ _ -> ())
+  | NewObjE (Type.(Object | Module | Memory), fs, _) ->
+    let static_fs = List.map (fun f ->
+          let st =
+            match VarEnv.lookup_var pre_ae f.it.var with
+            | Some (VarEnv.Const c) -> c
+            | _ -> fatal "compile_const_exp/ObjE: \"%s\" not found" f.it.var
+          in f.it.name, st) fs
+    in
+    (Const.Obj static_fs), fun _ _ -> ()
+  | PrimE (DotPrim name, [e]) ->
+    let (object_ct, fill) = compile_const_exp env pre_ae e in
+    let fs = match object_ct with
+      | Const.Obj fs -> fs
+      | _ -> fatal "compile_const_exp/DotE: not a static object" in
+    let member_ct = List.assoc name fs in
+    (member_ct, fill)
+  | PrimE (ProjPrim i, [e]) ->
+    let (object_ct, fill) = compile_const_exp env pre_ae e in
+    let cs = match object_ct with
+      | Const.Tuple cs -> cs
+      | _ -> fatal "compile_const_exp/ProjE: not a static tuple" in
+    (List.nth cs i, fill)
+  | LitE l -> Const.(Lit (const_lit_of_lit l)), (fun _ _ -> ())
+  | PrimE (TupPrim, []) -> Const.Unit, (fun _ _ -> ())
+  | PrimE (ArrayPrim (Const, _), es) ->
+    let (cs, fills) = List.split (List.map (compile_const_exp env pre_ae) es) in
+    (Const.Array cs),
+    (fun env ae -> List.iter (fun fill -> fill env ae) fills)
+  | PrimE (TupPrim, es) ->
+    let (cs, fills) = List.split (List.map (compile_const_exp env pre_ae) es) in
+    (Const.Tuple cs),
+    (fun env ae -> List.iter (fun fill -> fill env ae) fills)
+  | PrimE (TagPrim i, [e]) ->
+    let (arg_ct, fill) = compile_const_exp env pre_ae e in
+    (Const.Tag (i, arg_ct)),
+    fill
+  | PrimE (OptPrim, [e]) ->
+    let (arg_ct, fill) = compile_const_exp env pre_ae e in
+    (Const.Opt arg_ct),
+    fill
+
+  | _ -> assert false
+
+and compile_const_decs env pre_ae decs : (VarEnv.t -> VarEnv.t) * (E.t -> VarEnv.t -> unit) =
+  let rec go pre_ae = function
+    | []          -> (fun ae -> ae), (fun _ _ -> ())
+    | [dec]       -> compile_const_dec env pre_ae dec
+    | (dec::decs) ->
+        let (extend1, fill1) = compile_const_dec env pre_ae dec in
+        let pre_ae1 = extend1 pre_ae in
+        let (extend2, fill2) = go                    pre_ae1 decs in
+        (fun ae -> extend2 (extend1 ae)),
+        (fun env ae -> fill1 env ae; fill2 env ae) in
+  go pre_ae decs
+
+and const_exp_matches_pat env ae pat exp : bool =
+  assert exp.note.Note.const;
+  let c, _ = compile_const_exp env ae exp in
+  match destruct_const_pat VarEnv.empty_ae pat c with Some _ -> true | _ -> false
+
+and destruct_const_pat ae pat const : VarEnv.t option = match pat.it with
+  | WildP -> Some ae
+  | VarP v -> Some (VarEnv.add_local_const ae v const pat.note)
+  | ObjP pfs ->
+    let fs = match const with Const.Obj fs -> fs | _ -> assert false in
+    List.fold_left (fun ae (pf : pat_field) ->
+      match ae, List.find_opt (fun (n, _) -> pf.it.name = n) fs with
+      | None, _ -> None
+      | Some ae, Some (_, c) -> destruct_const_pat ae pf.it.pat c
+      | _, None -> assert false
+    ) (Some ae) pfs
+  | AltP (p1, p2) ->
+    let l = destruct_const_pat ae p1 const in
+    if l = None then destruct_const_pat ae p2 const
+    else l
+  | TupP ps ->
+    let cs = match const with Const.Tuple cs -> cs | Const.Unit -> [] | _ -> assert false in
+    let go ae p c = match ae with
+      | Some ae -> destruct_const_pat ae p c
+      | _ -> None in
+    List.fold_left2 go (Some ae) ps cs
+  | LitP lp ->
+    begin match const with
+    | Const.Lit lc when Const.lit_eq (const_lit_of_lit lp) lc -> Some ae
+    | _ -> None
+    end
+  | OptP p ->
+    begin match const with
+      | Const.Opt c -> destruct_const_pat ae p c
+      | Const.(Lit Null) -> None
+      | _ -> assert false
+    end
+  | TagP (i, p) ->
+     match const with
+     | Const.Tag (ic, c) when i = ic -> destruct_const_pat ae p c
+     | Const.Tag _ -> None
+     | _ -> assert false
+
+and compile_const_dec env pre_ae dec : (VarEnv.t -> VarEnv.t) * (E.t -> VarEnv.t -> unit) =
+  (* This returns a _function_ to extend the VarEnv, instead of doing it, because
+  it needs to be extended twice: Once during the pass that gets the outer, static values
+  (no forward references), and then to implement the `fill`, which compiles the bodies
+  of functions (may contain forward references.) *)
+  match dec.it with
+  (* This should only contain constants (cf. is_const_exp) *)
+  | LetD (p, e) ->
+    let (const, fill) = compile_const_exp env pre_ae e in
+    (fun ae -> match destruct_const_pat ae p const with Some ae -> ae | _ -> assert false),
+    (fun env ae -> fill env ae)
+  | VarD _ | RefD _ -> fatal "compile_const_dec: Unexpected VarD/RefD"
+
+and compile_init_func mod_env ((cu, flavor) : Ir.prog) =
+  assert (not flavor.has_typ_field);
+  assert (not flavor.has_poly_eq);
+  assert (not flavor.has_show);
+  assert (not flavor.has_await);
+  assert (not flavor.has_async_typ);
+  match cu with
+  | LibU _ -> fatal "compile_start_func: Cannot compile library"
+  | ProgU ds ->
+    Func.define_built_in mod_env "init" [] [] (fun env ->
+      let _ae, codeW = compile_decs env VarEnv.empty_ae ds Freevars.S.empty in
+      codeW G.nop
+    )
+  | ActorU (as_opt, ds, fs, up, t) ->
+    main_actor as_opt mod_env ds fs up
+
+and export_actor_field env  ae (f : Ir.field) =
+  (* A public actor field is guaranteed to be compiled as a PublicMethod *)
+  let fi =
+    match VarEnv.lookup_var ae f.it.var with
+    | Some (VarEnv.PublicMethod (fi, _)) -> fi
+    | _ -> assert false in
+
+  E.add_export env (nr {
+    name = Lib.Utf8.decode (match E.mode env with
+      | Flags.ICMode | Flags.RefMode ->
+        Mo_types.Type.(
+        match normalize f.note with
+        |  Func(Shared sort,_,_,_,_) ->
+           (match sort with
+            | Write -> "canister_update " ^ f.it.name
+            | Query -> "canister_query " ^ f.it.name
+            | Composite -> "canister_composite_query " ^ f.it.name
+           )
+        | _ -> assert false)
+      | _ -> assert false);
+    edesc = nr (FuncExport (nr fi))
+  })
+
+(* Main actor *)
+and main_actor as_opt mod_env ds fs up =
+  let stable_actor_type = up.stable_type in
+  let build_stable_actor = up.stable_record in
+  IncrementalGraphStabilization.define_methods mod_env stable_actor_type;
+
+  (* Export metadata *)
+  mod_env.E.stable_types := metadata "motoko:stable-types" up.meta.sig_;
+  mod_env.E.service := metadata "candid:service" up.meta.candid.service;
+  mod_env.E.args := metadata "candid:args" up.meta.candid.args;
+
+  Func.define_built_in mod_env IC.initialize_main_actor_function_name [] [] (fun env ->
+    let ae0 = VarEnv.empty_ae in
+    let captured = Freevars.captured_vars (Freevars.actor ds fs up) in
+    (* Add any params to the environment *)
+    (* Captured ones need to go into static memory, the rest into locals *)
+    let args = match as_opt with None -> [] | Some as_ -> as_ in
+    let arg_list = List.map (fun a -> (a.it, a.note)) args in
+    let arg_names = List.map (fun a -> a.it) args in
+    let arg_tys = List.map (fun a -> a.note) args in
+    let as_local n = not (Freevars.S.mem n captured) in
+    let ae1 = VarEnv.add_arguments env ae0 as_local arg_list in
+
+    (* Reverse the fs, to a map from variable to exported name *)
+    let v2en = E.NameEnv.from_list (List.map (fun f -> (f.it.var, f.it.name)) fs) in
+
+    (* Compile the declarations *)
+    let ae2, decls_codeW = compile_decs_public env ae1 ds v2en
+      Freevars.(captured_vars (system up))
+    in
+
+    (* Export the public functions *)
+    List.iter (export_actor_field env ae2) fs;
+
+    (* Export upgrade hooks *)
+    Func.define_built_in env "pre_exp" [] [] (fun env ->
+      compile_exp_as env ae2 SR.unit up.preupgrade);
+    Func.define_built_in env "post_exp" [] [] (fun env ->
+      compile_exp_as env ae2 SR.unit up.postupgrade);
+    IC.export_upgrade_methods env;
+
+    (* Export heartbeat (but only when required) *)
+    begin match up.heartbeat.it with
+     | Ir.PrimE (Ir.TupPrim, []) -> ()
+     | _ ->
+       Func.define_built_in env "heartbeat_exp" [] [] (fun env ->
+         compile_exp_as env ae2 SR.unit up.heartbeat);
+       IC.export_heartbeat env;
+    end;
+
+    (* Export timer (but only when required) *)
+    begin match up.timer.it with
+     | Ir.PrimE (Ir.TupPrim, []) -> ()
+     | _ ->
+       Func.define_built_in env "timer_exp" [] [] (fun env ->
+         compile_exp_as env ae2 SR.unit up.timer);
+       IC.export_timer env;
+    end;
+
+    (* Export inspect (but only when required) *)
+    begin match up.inspect.it with
+     | Ir.PrimE (Ir.TupPrim, []) -> ()
+     | _ ->
+       Func.define_built_in env "inspect_exp" [] [] (fun env ->
+         compile_exp_as env ae2 SR.unit up.inspect);
+       IC.export_inspect env;
+    end;
+
+    (* Helper function to build the stable actor wrapper *)
+    Func.define_built_in mod_env IC.get_actor_to_persist_function_name [] [I64Type] (fun env ->
+      compile_exp_as env ae2 SR.Vanilla build_stable_actor
+    );
+
+    (* Deserialize the init arguments *)
+    begin match as_opt with
+      | None
+      | Some [] ->
+        (* Liberally accept empty as well as unit argument *)
+        assert (arg_tys = []);
+        Persistence.get_init_message_payload env ^^
+        Blob.len env ^^
+        compile_eq_const 0L ^^
+        E.if0
+          G.nop
+          begin
+            (* Only validate the message payload. *)
+            Persistence.get_init_message_payload env ^^
+            Bool.lit false ^^ (* cannot recover *)
+            Serialization.deserialize_from_blob false env arg_tys
+          end
+      | Some (_ :: _) ->
+        Persistence.get_init_message_payload env ^^
+        Bool.lit false ^^ (* cannot recover *)
+        Serialization.deserialize_from_blob false env arg_tys ^^
+        G.concat_map (Var.set_val_vanilla_from_stack env ae1) (List.rev arg_names)
+    end ^^
+    begin
+      if up.timer.at <> no_region then
+        (* initiate a timer pulse *)
+        compile_unboxed_const 1L ^^
+        IC.system_call env "global_timer_set" ^^
+        G.i Drop
+      else
+        G.nop
+    end ^^
+
+    decls_codeW G.nop
+  );
+
+  Func.define_built_in mod_env "init" [] [] (fun env ->
+    IC.init_globals env ^^
+    (* Save the init message payload for later deserializtion. *)
+    IC.arg_data env ^^
+    Persistence.set_init_message_payload env ^^
+    Persistence.initialize env stable_actor_type
+  )
+
+and metadata name value =
+  if List.mem name !Flags.omit_metadata_names then None
+  else Some (
+           List.mem name !Flags.public_metadata_names,
+           value)
+
+and conclude_module env set_serialization_globals start_fi_o =
+
+  RTS_Exports.system_exports env;
+
+  FuncDec.export_async_method env;
+  FuncDec.export_gc_trigger_method env;
+  FuncDec.export_stabilization_limits env;
+  
+  (* See Note [Candid subtype checks] *)
+  Serialization.create_global_type_descriptor env set_serialization_globals;
+
+  (* declare before building GC *)
+
+  (* add beginning-of-heap pointer, may be changed by linker *)
+  (* needs to happen here now that we know the size of static memory *)
+  let set_heap_base = E.add_global64_delayed env "__heap_base" Immutable in
+  E.export_global env "__heap_base";
+
+  Heap.register env;
+  IC.register env;
+
+  let dynamic_heap_start = Lifecycle.end_ () in
+  set_heap_base dynamic_heap_start;
+
+  (* Wrap the start function with the RTS initialization *)
+  let rts_start_fi = E.add_fun env "rts_start" (Func.of_body env [] [] (fun env1 ->
+    E.call_import env "rts" ("initialize_incremental_gc") ^^
+    GCRoots.register_static_variables env ^^
+    match start_fi_o with
+    | Some fi ->
+      G.i (Call fi)
+    | None ->
+      Lifecycle.set env Lifecycle.PreInit
+  )) in
+
+  IC.default_exports env;
+
+  let func_imports = E.get_func_imports env in
+  let ni = List.length func_imports in
+  let ni' = Int32.of_int ni in
+
+  let other_imports = E.get_other_imports env in
+
+  let initial_memory_pages = Int64.(add (div dynamic_heap_start page_size) 1L) in
+  let memories = E.get_memories env initial_memory_pages in
+
+  let funcs = E.get_funcs env in
+
+  let datas = List.map (fun (dinit) -> nr {
+    dinit;
+    dmode = (nr Wasm_exts.Ast.Passive);
+    }) (E.get_data_segments env) in
+
+  let elems = List.map (fun (fi, fp) -> nr {
+    index = nr 0l;
+    offset = nr (G.to_instr_list (compile_const_32 fp));
+    init = [ nr fi ];
+    }) (E.get_elems env) in
+
+  let table_sz = E.get_end_of_table env in
+
+  let module_ = {
+      types = List.map nr (E.get_types env);
+      funcs = List.map (fun (f,_,_) -> f) funcs;
+      tables = [ nr { ttype = TableType ({min = table_sz; max = Some table_sz}, FuncRefType) } ];
+      elems;
+      start = Some (nr rts_start_fi);
+      globals = E.get_globals env;
+      memories;
+      imports = func_imports @ other_imports;
+      exports = E.get_exports env;
+      datas
+    } in
+
+  let emodule =
+    let open Wasm_exts.CustomModule in
+    { module_;
+      dylink = None;
+      name = { empty_name_section with function_names =
+                 List.mapi (fun i (f,n,_) -> Int32.(add ni' (of_int i), n)) funcs;
+               locals_names =
+                 List.mapi (fun i (f,_,ln) -> Int32.(add ni' (of_int i), ln)) funcs; };
+      motoko = {
+        labels = E.get_labs env;
+        stable_types = !(env.E.stable_types);
+        compiler = metadata "motoko:compiler" (Lib.Option.get Source_id.release Source_id.id);
+      };
+      enhanced_orthogonal_persistence = Some (false, "64-bit, layout version 1");
+      candid = {
+        args = !(env.E.args);
+        service = !(env.E.service);
+      };
+      source_mapping_url = None;
+      wasm_features = E.get_features env;
+    } in
+
+  match E.get_rts env with
+  | None -> emodule
+  | Some rts -> Linking.LinkModule.link emodule "rts" rts
+
+let compile mode rts (prog : Ir.prog) : Wasm_exts.CustomModule.extended_module =
+  (* Enhanced orthogonal persistence requires a fixed layout. *)
+  assert !Flags.rtti; (* Use precise tagging for graph copy. *)
+  assert (!Flags.gc_strategy = Flags.Incremental); (* Define heap layout with the incremental GC. *)
+  let env = E.mk_global mode rts IC.trap_with in
+
+  IC.register_globals env;
+  Stack.register_globals env;
+  GC.register_globals env;
+  StableMem.register_globals env;
+  Serialization.Registers.register_globals env;
+  Serialization.Registers.define_idl_limit_check env;
+  IncrementalGraphStabilization.register_globals env;
+  Persistence.register_globals env;
+  
+  (* See Note [Candid subtype checks] *)
+  let set_serialization_globals = Serialization.register_delayed_globals env in
+  Serialization.reserve_global_type_descriptor env;
+  
+  IC.system_imports env;
+  RTS.system_imports env;
+
+  compile_init_func env prog;
+  let start_fi_o = match E.mode env with
+    | Flags.ICMode | Flags.RefMode ->
+      IC.export_init env;
+      None
+    | Flags.WASIMode ->
+      IC.export_wasi_start env;
+      None
+    | Flags.WasmMode ->
+      Some (nr (E.built_in env "init"))
+  in
+
+  conclude_module env set_serialization_globals start_fi_o
diff --git a/src/codegen/compile_enhanced.mli b/src/codegen/compile_enhanced.mli
new file mode 100644
index 00000000000..165bd4d8061
--- /dev/null
+++ b/src/codegen/compile_enhanced.mli
@@ -0,0 +1,3 @@
+open Ir_def
+
+val compile : Mo_config.Flags.compile_mode -> Wasm_exts.CustomModule.extended_module option -> Ir.prog -> Wasm_exts.CustomModule.extended_module
diff --git a/src/codegen/die.ml b/src/codegen/die.ml
index 48f1bff083b..d52a2005c88 100644
--- a/src/codegen/die.ml
+++ b/src/codegen/die.ml
@@ -219,10 +219,10 @@ and prim_type_ref (prim : Type.prim) : die list * int =
           (dw_attrs [name; Bit_size 29; Data_bit_offset 8; Encoding dw_ATE_UTF])
       | Type.(Int | Nat) ->
         with_referencable_tags add dw_TAG_base_type
-          (dw_attrs [name; Bit_size 32; Data_bit_offset 0(*FIXME: for now*); Encoding dw_ATE_signed])
+          (dw_attrs [name; Bit_size 64; Data_bit_offset 0(*FIXME: for now*); Encoding dw_ATE_signed])
       | Type.Text -> (* FIXME: should be dynamic, like Any *)
         with_referencable_tags add dw_TAG_base_type
-          (dw_attrs [name; Bit_size 32; Data_bit_offset 0(*FIXME: for now*); Encoding dw_ATE_UTF])
+          (dw_attrs [name; Bit_size 64; Data_bit_offset 0(*FIXME: for now*); Encoding dw_ATE_UTF])
       | Type.(Int8|Int16|Int32) ->
         with_referencable_tags add dw_TAG_base_type
           (dw_attrs [name; Bit_size 32; Data_bit_offset 0(*FIXME: for now*); Encoding dw_ATE_signed])
diff --git a/src/codegen/instrList.ml b/src/codegen/instrList.ml
index fbfd0d35edc..886a103cdd1 100644
--- a/src/codegen/instrList.ml
+++ b/src/codegen/instrList.ml
@@ -9,7 +9,7 @@ features are
 
 open Wasm_exts.Ast
 open Wasm.Source
-open Wasm.Values
+open Wasm_exts.Values
 
 let combine_shifts const op = function
   | I32 opl, ({it = I32 l'; _} as cl), I32 opr, I32 r' when opl = opr ->
@@ -17,6 +17,11 @@ let combine_shifts const op = function
     if (l >= 0 && l < 32 && r >= 0 && r < 32 && l + r < 32) then
       Some [{const with it = Const {cl with it = I32 (Int32.add l' r')}}; {op with it = Binary (I32 opl)}]
     else None
+  | I64 opl, ({it = I64 l'; _} as cl), I64 opr, I64 r' when opl = opr ->
+    let l, r = Int64.(to_int l', to_int r') in
+    if (l >= 0 && l < 64 && r >= 0 && r < 64 && l + r < 64) then
+      Some [{const with it = Const {cl with it = I64 (Int64.add l' r')}}; {op with it = Binary (I64 opl)}]
+    else None
   | _ -> None
 
 (* Some simple peephole optimizations, to make the output code look less stupid *)
@@ -91,6 +96,8 @@ let optimize : instr list -> instr list = fun is ->
     (* LSBit masking before `If` is `Ctz` and switched `If` legs *)
     | ({ it = Binary (I32 I32Op.And); _} as a) :: { it = Const {it = I32 1l; _}; _} :: l', ({it = If (res,then_,else_); _} as i) :: r' ->
       go ({a with it = Unary (I32 I32Op.Ctz)} :: l') ({i with it = If (res,else_,then_)} :: r')
+    | ({ it = Binary (I64 I64Op.And); _} as a) :: { it = Const {it = I64 1L; _}; _} :: l', ({it = If (res,then_,else_); _} as i) :: r' ->
+      go ({a with it = Unary (I64 I64Op.Ctz)} :: l') ({i with it = If (res,else_,then_)} :: r')
     (* `If` blocks after pushed constants are simplifiable *)
     | { it = Const {it = I32 0l; _}; _} :: l', ({it = If (res,_,else_); _} as i) :: r' ->
       go l' ({i with it = Block (res, else_)} :: r')
@@ -114,9 +121,33 @@ let optimize : instr list -> instr list = fun is ->
       ({it = Const cr; _} as const) :: ({it = Binary opr; _} as op) :: r'
         when Option.is_some (combine_shifts const op (opl, cl, opr, cr.it)) ->
       go l' (Option.get (combine_shifts const op (opl, cl, opr, cr.it)) @ r')
+    | {it = Binary (I64 I64Op.(Shl|ShrS|ShrU) as opl); _} :: {it = Const cl; _} :: l',
+      ({it = Const cr; _} as const) :: ({it = Binary opr; _} as op) :: r'
+        when Option.is_some (combine_shifts const op (opl, cl, opr, cr.it)) ->
+      go l' (Option.get (combine_shifts const op (opl, cl, opr, cr.it)) @ r')
     (* Null shifts can be eliminated *)
     | l', {it = Const {it = I32 0l; _}; _} :: {it = Binary (I32 I32Op.(Shl|ShrS|ShrU)); _} :: r' ->
       go l' r'
+    | l', {it = Const {it = I64 0L; _}; _} :: {it = Binary (I64 I64Op.(Shl|ShrS|ShrU)); _} :: r' ->
+      go l' r'
+    (* Widen followed by narrow is pointless - but not the opposite! *)
+    | {it = Convert (I64 I64Op.(ExtendSI32 | ExtendUI32)); _} :: l', {it = Convert (I32 I32Op.WrapI64); _} :: r' -> 
+      go l' r'
+    (* Constant bitwise `and` evaluation *)
+    | l', {it = Const {it = I64 cl; _}; _} :: {it = Const {it = I64 cr; _}; _} :: {it = Binary (I64 I64Op.And); at} :: r' ->
+      let combined = {it = Const {it = I64 (Int64.logand cl cr); at}; at} in
+      go l' (combined :: r')
+    (* Constant bitwise `or` evaluation *)
+    | l', {it = Const {it = I64 cl; _}; _} :: {it = Const {it = I64 cr; _}; _} :: {it = Binary (I64 I64Op.Or); at} :: r' ->
+      let combined = {it = Const {it = I64 (Int64.logor cl cr); at}; at} in
+      go l' (combined :: r')
+    (* Widen followed by I64.Eqz can be simplified to I32.Eqz *)
+    | l', {it = Convert (I64 I64Op.(ExtendSI32 | ExtendUI32)); _} :: {it = Test (I64 I64Op.Eqz); at} :: r' ->
+      go l' ({it = Test (I32 I32Op.Eqz); at} :: r')
+    (* Narrow a constant *)
+    | l', {it = Const {it = I64 c; _}; at} :: {it = Convert (I32 I32Op.WrapI64); _} :: r' ->
+      let narrowed = {it = Const {it = I32 (Int64.to_int32 c); at}; at} in
+      go l' (narrowed :: r')
     (* Look further *)
     | _, i::r' -> go (i::l) r'
     (* Done looking *)
diff --git a/src/exes/candid_tests.ml b/src/exes/candid_tests.ml
index bddd6aae3dd..6c43abda5f9 100644
--- a/src/exes/candid_tests.ml
+++ b/src/exes/candid_tests.ml
@@ -14,7 +14,7 @@ let name = "candid-tests"
 let version = "0.1"
 let banner = "Candid test suite runner " ^ version ^ ""
 let usage = "Usage: " ^ name ^ " [ -i path/to/candid/test ]"
-let _WASMTIME_OPTIONS_ = "--disable-cache --enable-cranelift-nan-canonicalization --wasm-features multi-memory,bulk-memory"
+let _WASMTIME_OPTIONS_ = "--disable-cache --enable-cranelift-nan-canonicalization --wasm-features=memory64,multi-memory,bulk-memory"
 
 (* Argument handling *)
 
diff --git a/src/exes/moc.ml b/src/exes/moc.ml
index 16dca39a53b..9b81a6fcb4b 100644
--- a/src/exes/moc.ml
+++ b/src/exes/moc.ml
@@ -147,6 +147,22 @@ let argspec = [
     Flags.use_stable_regions := true),
       " force eager initialization of stable regions metadata (for testing purposes); consumes between 386KiB or 8MiB of additional physical stable memory, depending on current use of ExperimentalStableMemory library";
 
+  "--generational-gc",
+  Arg.Unit (fun () -> Flags.gc_strategy := Mo_config.Flags.Generational),
+  " use generational GC (only available with classical persistence)";
+
+  "--incremental-gc",
+  Arg.Unit (fun () -> Flags.gc_strategy := Mo_config.Flags.Incremental),
+  " use incremental GC (default with enhanced orthogonal persistence)";
+
+  "--compacting-gc",
+  Arg.Unit (fun () -> Flags.gc_strategy := Mo_config.Flags.MarkCompact),
+  " use compacting GC (only available with classical persistence)";
+
+  "--copying-gc",
+  Arg.Unit (fun () -> Flags.gc_strategy := Mo_config.Flags.Copying),
+  " use copying GC (default and only available with classical persistence)";
+
   "--force-gc",
   Arg.Unit (fun () -> Flags.force_gc := true),
   " disable GC scheduling, always do GC after an update message (for testing)";
@@ -163,10 +179,37 @@ let argspec = [
   Arg.Unit (fun () -> Flags.experimental_field_aliasing := true),
   " enable experimental support for aliasing of var fields";
 
+  "--experimental-rtti",
+  Arg.Unit (fun () -> Flags.rtti := true),
+  " enable experimental support for precise runtime type information (default with enhanced orthogonal persistence)";
+
+  "--rts-stack-pages",
+  Arg.Int (fun pages -> Flags.rts_stack_pages := Some pages),
+  "  set maximum number of pages available for runtime system stack (default " ^ (Int.to_string Flags.rts_stack_pages_default) ^ ", only available with classical persistence)";
+
   "--trap-on-call-error",
   Arg.Unit (fun () -> Flags.trap_on_call_error := true),
   " Trap, don't throw an `Error`, when an IC call fails due to destination queue full or freezing threshold is crossed. Emulates behaviour of moc versions < 0.8.0.";
 
+  (* persistence *)
+  "--enhanced-orthogonal-persistence",
+  Arg.Unit (fun () -> Flags.enhanced_orthogonal_persistence := true),
+  " Use enhanced orthogonal persistence (experimental): Scalable and fast upgrades using a persistent 64-bit main memory.";
+
+  "--stabilization-instruction-limit",
+  Arg.Int (fun limit -> Flags.(stabilization_instruction_limit := {
+    upgrade = limit; 
+    update_call = limit;
+  })),
+  "  set instruction limit for incremental graph-copy-based stabilization and destabilization (for testing)";
+
+  "--stable-memory-access-limit",
+  Arg.Int (fun limit -> Flags.(stable_memory_access_limit := {
+    upgrade = limit; 
+    update_call = limit;
+  })),
+  "  set stable memory access limit for incremental graph-copy-based stabilization and destabilization (for testing)";
+
   (* optimizations *)
   "-fno-shared-code",
   Arg.Unit (fun () -> Flags.share_code := false),
diff --git a/src/ir_def/arrange_ir.ml b/src/ir_def/arrange_ir.ml
index 54a6056b981..d476aa83ec7 100644
--- a/src/ir_def/arrange_ir.ml
+++ b/src/ir_def/arrange_ir.ml
@@ -35,13 +35,15 @@ let rec exp e = match e.it with
   | TryE (e, cs, None) -> "TryE" $$ [exp e] @ List.map case cs
   | TryE (e, cs, Some (i, _)) -> "TryE" $$ [exp e] @ List.map case cs @ Atom ";" :: [id i]
 
-and system { meta; preupgrade; postupgrade; heartbeat; timer; inspect} = (* TODO: show meta? *)
+and system { meta; preupgrade; postupgrade; heartbeat; timer; inspect; stable_record; stable_type} = (* TODO: show meta? *)
   "System" $$ [
       "Pre" $$ [exp preupgrade];
       "Post" $$ [exp postupgrade];
       "Heartbeat" $$ [exp heartbeat];
       "Timer" $$ [exp timer];
       "Inspect" $$ [exp inspect];
+      "StableRecord" $$ [exp stable_record];
+      "StableType" $$ [typ stable_type]
     ]
 
 and lexp le = match le.it with
diff --git a/src/ir_def/check_ir.ml b/src/ir_def/check_ir.ml
index 121a60ca3c4..a70c10f47d1 100644
--- a/src/ir_def/check_ir.ml
+++ b/src/ir_def/check_ir.ml
@@ -631,10 +631,9 @@ let rec check_exp env (exp:Ir.exp) : unit =
       check_typ env t1;
       check (store_typ t1) "Invalid type argument to ICStableRead";
       t1 <: t
-    | ICStableWrite t1, [exp1] ->
+    | ICStableWrite t1, [] ->
       check_typ env t1;
       check (store_typ t1) "Invalid type argument to ICStableWrite";
-      typ exp1 <: t1;
       T.unit <: t
     | NumConvWrapPrim (p1, p2), [e] ->
       (* we should check if this conversion is supported *)
@@ -816,7 +815,7 @@ let rec check_exp env (exp:Ir.exp) : unit =
     typ exp_r <: T.(Construct.err_contT unit);
     typ exp_c <: Construct.clean_contT;
   | ActorE (ds, fs,
-      { preupgrade; postupgrade; meta; heartbeat; timer; inspect }, t0) ->
+      { preupgrade; postupgrade; meta; heartbeat; timer; inspect; stable_record; stable_type }, t0) ->
     (* TODO: check meta *)
     let env' = { env with async = None } in
     let scope1 = gather_block_decs env' ds in
@@ -827,11 +826,13 @@ let rec check_exp env (exp:Ir.exp) : unit =
     check_exp env'' heartbeat;
     check_exp env'' timer;
     check_exp env'' inspect;
+    check_exp env'' stable_record;
     typ preupgrade <: T.unit;
     typ postupgrade <: T.unit;
     typ heartbeat <: T.unit;
     typ timer <: T.unit;
     typ inspect <: T.unit;
+    typ stable_record <: stable_type;
     check (T.is_obj t0) "bad annotation (object type expected)";
     let (s0, tfs0) = T.as_obj t0 in
     let val_tfs0 = List.filter (fun tf -> not (T.is_typ tf.T.typ)) tfs0 in
@@ -1157,7 +1158,7 @@ let check_comp_unit env = function
     let env' = adjoin env scope in
     check_decs env' ds
   | ActorU (as_opt, ds, fs,
-      { preupgrade; postupgrade; meta; heartbeat; timer; inspect }, t0) ->
+      { preupgrade; postupgrade; meta; heartbeat; timer; inspect; stable_type; stable_record }, t0) ->
     let check p = check env no_region p in
     let (<:) t1 t2 = check_sub env no_region t1 t2 in
     let env' = match as_opt with
@@ -1175,11 +1176,13 @@ let check_comp_unit env = function
     check_exp env'' heartbeat;
     check_exp env'' timer;
     check_exp env'' inspect;
+    check_exp env'' stable_record;
     typ preupgrade <: T.unit;
     typ postupgrade <: T.unit;
     typ heartbeat <: T.unit;
     typ timer <: T.unit;
     typ inspect <: T.unit;
+    typ stable_record <: stable_type;
     check (T.is_obj t0) "bad annotation (object type expected)";
     let (s0, tfs0) = T.as_obj t0 in
     let val_tfs0 = List.filter (fun tf -> not (T.is_typ tf.T.typ)) tfs0 in
diff --git a/src/ir_def/construct.ml b/src/ir_def/construct.ml
index 2a92c9ca3ea..5de2e5ef0a7 100644
--- a/src/ir_def/construct.ml
+++ b/src/ir_def/construct.ml
@@ -90,10 +90,10 @@ let primE prim es =
     | ICReplyPrim _
     | ICRejectPrim -> T.Non
     | ICCallerPrim -> T.caller
+    | ICStableWrite _ -> T.unit
     | ICStableRead t -> t
     | ICMethodNamePrim -> T.text
     | ICPerformGC
-    | ICStableWrite _ -> T.unit
     | ICStableSize _ -> T.nat64
     | IdxPrim
     | DerefArrayOffset -> T.(as_immut (as_array_sub (List.hd es).note.Note.typ))
diff --git a/src/ir_def/freevars.ml b/src/ir_def/freevars.ml
index bb1fbaa41f8..ae8d8309cf5 100644
--- a/src/ir_def/freevars.ml
+++ b/src/ir_def/freevars.ml
@@ -123,12 +123,13 @@ let rec exp e : f = match e.it with
 
 and actor ds fs u = close (decs ds +++ fields fs +++ system u)
 
-and system {meta; preupgrade; postupgrade; heartbeat; timer; inspect} =
+and system {meta; preupgrade; postupgrade; heartbeat; timer; inspect; stable_record; _} =
   under_lambda (exp preupgrade) ++
   under_lambda (exp postupgrade) ++
   under_lambda (exp heartbeat) ++
   under_lambda (exp timer) ++
-  under_lambda (exp inspect)
+  under_lambda (exp inspect) ++
+  under_lambda (exp stable_record)
 
 and exps es : f = unions exp es
 
diff --git a/src/ir_def/ir.ml b/src/ir_def/ir.ml
index 58419ef92ad..413b56506dd 100644
--- a/src/ir_def/ir.ml
+++ b/src/ir_def/ir.ml
@@ -56,6 +56,7 @@ type arg = (string, Type.typ) Source.annotated_phrase
 (* Expressions *)
 
 type exp = exp' phrase
+
 and exp' =
   | PrimE of (prim * exp list)                 (* primitive *)
   | VarE of mut * id                           (* variable *)
@@ -83,7 +84,9 @@ and system = {
   postupgrade : exp;
   heartbeat : exp;
   timer : exp; (* TODO: use an option type: (Default of exp | UserDefined of exp) option *)
-  inspect : exp
+  inspect : exp;
+  stable_record: exp;
+  stable_type: Type.typ;
 }
 
 and candid = {
@@ -232,6 +235,13 @@ let full_flavor () : flavor = {
   has_poly_eq = true;
 }
 
+type actor_type = {
+  (* original actor type, including all actor fields *)
+  transient_actor_type: Type.typ;
+  (* record of stable actor fields used for persistence,
+     the fields are without mutability distinctions *)
+  stable_actor_type: Type.typ
+}
 
 (* Program *)
 
@@ -239,6 +249,7 @@ type comp_unit =
   | LibU of dec list * exp
   | ProgU of dec list
   | ActorU of arg list option * dec list * field list * system * Type.typ (* actor (class) *)
+     
 
 type prog = comp_unit * flavor
 
@@ -313,4 +324,3 @@ let map_prim t_typ t_id p =
   | ICStableWrite t -> ICStableWrite (t_typ t)
   | ICStableRead t -> ICStableRead (t_typ t)
   | ICStableSize t -> ICStableSize (t_typ t)
-
diff --git a/src/ir_def/rename.ml b/src/ir_def/rename.ml
index 4a4580877b1..f97594fba41 100644
--- a/src/ir_def/rename.ml
+++ b/src/ir_def/rename.ml
@@ -33,7 +33,7 @@ and exp' rho = function
   | VarE (m, i)         -> VarE (m, id rho i)
   | LitE _ as e         -> e
   | PrimE (p, es)       -> PrimE (prim rho p, List.map (exp rho) es)
-  | ActorE (ds, fs, { meta; preupgrade; postupgrade; heartbeat; timer; inspect }, t) ->
+  | ActorE (ds, fs, { meta; preupgrade; postupgrade; heartbeat; timer; inspect; stable_record; stable_type}, t) ->
     let ds', rho' = decs rho ds in
     ActorE
       (ds',
@@ -44,8 +44,11 @@ and exp' rho = function
         heartbeat = exp rho' heartbeat;
         timer = exp rho' timer;
         inspect = exp rho' inspect;
+        stable_type = stable_type;
+        stable_record = exp rho' stable_record;
        },
        t)
+
   | AssignE (e1, e2)    -> AssignE (lexp rho e1, exp rho e2)
   | BlockE (ds, e1)     -> let ds', rho' = decs rho ds
                            in BlockE (ds', exp rho' e1)
@@ -197,7 +200,7 @@ let comp_unit rho cu = match cu with
   | LibU (ds, e) ->
     let ds', rho' = decs rho ds
     in LibU (ds', exp rho' e)
-  | ActorU (as_opt, ds, fs, { meta; preupgrade; postupgrade; heartbeat; timer; inspect }, t) ->
+  | ActorU (as_opt, ds, fs, { meta; preupgrade; postupgrade; heartbeat; timer; inspect; stable_record; stable_type }, t) ->
     let as_opt', rho' = match as_opt with
       | None -> None, rho
       | Some as_ ->
@@ -212,4 +215,6 @@ let comp_unit rho cu = match cu with
         heartbeat = exp rho'' heartbeat;
         timer = exp rho'' timer;
         inspect = exp rho'' inspect;
+        stable_record = exp rho'' stable_record;
+        stable_type = stable_type;
       }, t)
diff --git a/src/ir_interpreter/interpret_ir.ml b/src/ir_interpreter/interpret_ir.ml
index cc74c355774..b3e1b79139b 100644
--- a/src/ir_interpreter/interpret_ir.ml
+++ b/src/ir_interpreter/interpret_ir.ml
@@ -462,8 +462,6 @@ and interpret_exp_mut env exp (k : V.value V.cont) =
           V.Env.empty tfs
         in
         k (V.Obj ve)
-      | ICStableWrite _, [v1] ->
-        k V.unit (* faking it *)
       | SelfRef _, [] ->
         k (V.Blob env.self)
       | SystemTimePrim, [] ->
diff --git a/src/ir_passes/async.ml b/src/ir_passes/async.ml
index d0a9db70c43..b3b52989e7a 100644
--- a/src/ir_passes/async.ml
+++ b/src/ir_passes/async.ml
@@ -442,15 +442,18 @@ let transform prog =
             | (Returns | Replies), _ -> assert false
           end
       end
-    | ActorE (ds, fs, {meta; preupgrade; postupgrade; heartbeat; timer; inspect}, typ) ->
+    | ActorE (ds, fs, {meta; preupgrade; postupgrade; heartbeat; timer; inspect; stable_record; stable_type}, typ) ->
       ActorE (t_decs ds, t_fields fs,
         {meta;
          preupgrade = t_exp preupgrade;
          postupgrade = t_exp postupgrade;
          heartbeat = t_exp heartbeat;
          timer = t_exp timer;
-         inspect = t_exp inspect
-        }, t_typ typ)
+         inspect = t_exp inspect;
+         stable_record = t_exp stable_record;
+         stable_type = t_typ stable_type;
+        },
+        t_typ typ)
     | NewObjE (sort, ids, t) ->
       NewObjE (sort, t_fields ids, t_typ t)
     | SelfCallE _ -> assert false
@@ -520,15 +523,18 @@ let transform prog =
   and t_comp_unit = function
     | LibU _ -> raise (Invalid_argument "cannot compile library")
     | ProgU ds -> ProgU (t_decs ds)
-    | ActorU (args_opt, ds, fs, {meta; preupgrade; postupgrade; heartbeat; timer; inspect}, t) ->
+    | ActorU (args_opt, ds, fs, {meta; preupgrade; postupgrade; heartbeat; timer; inspect; stable_record; stable_type}, t) ->
       ActorU (Option.map t_args args_opt, t_decs ds, t_fields fs,
         { meta;
           preupgrade = t_exp preupgrade;
           postupgrade = t_exp postupgrade;
           heartbeat = t_exp heartbeat;
           timer = t_exp timer;
-          inspect = t_exp inspect
-        }, t_typ t)
+          inspect = t_exp inspect;
+          stable_record = t_exp stable_record;
+          stable_type = t_typ stable_type;
+        },
+        t_typ t)
 
   and t_prog (cu, flavor) = (t_comp_unit cu, { flavor with has_async_typ = false } )
 in
diff --git a/src/ir_passes/await.ml b/src/ir_passes/await.ml
index 4072af11c75..84a2170a8d2 100644
--- a/src/ir_passes/await.ml
+++ b/src/ir_passes/await.ml
@@ -181,14 +181,16 @@ and t_exp' context exp =
     assert (not (T.is_shared_func (typ exp)));
     let context' = LabelEnv.singleton Return Label in
     FuncE (x, s, c, typbinds, pat, typs, t_exp context' exp1)
-  | ActorE (ds, ids, { meta; preupgrade; postupgrade; heartbeat; timer; inspect}, t) ->
+  | ActorE (ds, ids, { meta; preupgrade; postupgrade; heartbeat; timer; inspect; stable_record; stable_type}, t) ->
     ActorE (t_decs context ds, ids,
       { meta;
         preupgrade = t_exp LabelEnv.empty preupgrade;
         postupgrade = t_exp LabelEnv.empty postupgrade;
         heartbeat = t_ignore_throw LabelEnv.empty heartbeat;
         timer = t_ignore_throw LabelEnv.empty timer;
-        inspect = t_exp LabelEnv.empty inspect
+        inspect = t_exp LabelEnv.empty inspect;
+        stable_record = t_exp LabelEnv.empty stable_record;
+        stable_type;
       },
       t)
   | NewObjE (sort, ids, typ) -> exp.it
@@ -645,7 +647,7 @@ and t_comp_unit context = function
           expD (c_block context' ds (tupE []) (meta (T.unit) (fun v1 -> tupE [])))
         ]
     end
-  | ActorU (as_opt, ds, ids, { meta = m; preupgrade; postupgrade; heartbeat; timer; inspect}, t) ->
+  | ActorU (as_opt, ds, ids, { meta = m; preupgrade; postupgrade; heartbeat; timer; inspect; stable_record; stable_type}, t) ->
     ActorU (as_opt, t_decs context ds, ids,
       { meta = m;
         preupgrade = t_exp LabelEnv.empty preupgrade;
@@ -653,6 +655,8 @@ and t_comp_unit context = function
         heartbeat = t_ignore_throw LabelEnv.empty heartbeat;
         timer = t_ignore_throw LabelEnv.empty timer;
         inspect = t_exp LabelEnv.empty inspect;
+        stable_record = t_exp LabelEnv.empty stable_record;
+        stable_type;
       },
       t)
 
diff --git a/src/ir_passes/const.ml b/src/ir_passes/const.ml
index f258084d5ac..9bc4591cad1 100644
--- a/src/ir_passes/const.ml
+++ b/src/ir_passes/const.ml
@@ -164,7 +164,7 @@ let rec exp lvl (env : env) e : Lbool.t =
       surely_false
     | NewObjE _ -> (* mutable objects *)
       surely_false
-    | ActorE (ds, fs, {meta; preupgrade; postupgrade; heartbeat; timer; inspect}, _typ) ->
+    | ActorE (ds, fs, {meta; preupgrade; postupgrade; heartbeat; timer; inspect; stable_record; stable_type}, _typ) ->
       (* this may well be “the” top-level actor, so don’t update lvl here *)
       let (env', _) = decs lvl env ds in
       exp_ lvl env' preupgrade;
@@ -172,6 +172,7 @@ let rec exp lvl (env : env) e : Lbool.t =
       exp_ lvl env' heartbeat;
       exp_ lvl env' timer;
       exp_ lvl env' inspect;
+      exp_ lvl env' stable_record;
       surely_false
   in
   set_lazy_const e lb;
@@ -227,7 +228,7 @@ and block lvl env (ds, body) =
 and comp_unit = function
   | LibU _ -> raise (Invalid_argument "cannot compile library")
   | ProgU ds -> decs_ TopLvl M.empty ds
-  | ActorU (as_opt, ds, fs, {meta; preupgrade; postupgrade; heartbeat; timer; inspect}, typ) ->
+  | ActorU (as_opt, ds, fs, {meta; preupgrade; postupgrade; heartbeat; timer; inspect; stable_record; stable_type}, typ) ->
     let env = match as_opt with
       | None -> M.empty
       | Some as_ -> args TopLvl M.empty as_
@@ -237,7 +238,8 @@ and comp_unit = function
     exp_ TopLvl env' postupgrade;
     exp_ TopLvl env' heartbeat;
     exp_ TopLvl env' timer;
-    exp_ TopLvl env' inspect
+    exp_ TopLvl env' inspect;
+    exp_ TopLvl env' stable_record
 
 let analyze ((cu, _flavor) : prog) =
   ignore (comp_unit cu)
diff --git a/src/ir_passes/eq.ml b/src/ir_passes/eq.ml
index d0916e3ef5c..52486abcf4b 100644
--- a/src/ir_passes/eq.ml
+++ b/src/ir_passes/eq.ml
@@ -249,7 +249,7 @@ and t_exp' env = function
     NewObjE (sort, ids, t)
   | SelfCallE (ts, e1, e2, e3, e4) ->
     SelfCallE (ts, t_exp env e1, t_exp env e2, t_exp env e3, t_exp env e4)
-  | ActorE (ds, fields, {meta; preupgrade; postupgrade; heartbeat; timer; inspect}, typ) ->
+  | ActorE (ds, fields, {meta; preupgrade; postupgrade; heartbeat; timer; inspect; stable_record; stable_type}, typ) ->
     (* Until Actor expressions become their own units,
        we repeat what we do in `comp_unit` below *)
     let env1 = empty_env () in
@@ -259,6 +259,7 @@ and t_exp' env = function
     let heartbeat' = t_exp env1 heartbeat in
     let timer' = t_exp env1 timer in
     let inspect' = t_exp env1 inspect in
+    let stable_record' = t_exp env1 stable_record in
     let decls = eq_decls !(env1.params) in
     ActorE (decls @ ds', fields,
       {meta;
@@ -266,8 +267,12 @@ and t_exp' env = function
        postupgrade = postupgrade';
        heartbeat = heartbeat';
        timer = timer';
-       inspect = inspect'
-      }, typ)
+       inspect = inspect';
+       stable_record = stable_record';
+       stable_type;
+      },
+      typ
+      )
 
 and t_lexp env (e : Ir.lexp) = { e with it = t_lexp' env e.it }
 and t_lexp' env = function
@@ -296,7 +301,7 @@ and t_comp_unit = function
     let ds' = t_decs env ds in
     let decls = eq_decls !(env.params) in
     ProgU (decls @ ds')
-  | ActorU (as_opt, ds, fields, {meta; preupgrade; postupgrade; heartbeat; timer; inspect}, typ) ->
+  | ActorU (as_opt, ds, fields, {meta; preupgrade; postupgrade; heartbeat; timer; inspect; stable_record; stable_type}, typ) ->
     let env = empty_env () in
     let ds' = t_decs env ds in
     let preupgrade' = t_exp env preupgrade in
@@ -304,6 +309,7 @@ and t_comp_unit = function
     let heartbeat' = t_exp env heartbeat in
     let timer' = t_exp env timer in
     let inspect' = t_exp env inspect in
+    let stable_record' = t_exp env stable_record in
     let decls = eq_decls !(env.params) in
     ActorU (as_opt, decls @ ds', fields,
       {meta;
@@ -311,7 +317,9 @@ and t_comp_unit = function
        postupgrade = postupgrade';
        heartbeat = heartbeat';
        timer = timer';
-       inspect = inspect'
+       inspect = inspect';
+       stable_record = stable_record';
+       stable_type;
       }, typ)
 
 (* Entry point for the program transformation *)
diff --git a/src/ir_passes/erase_typ_field.ml b/src/ir_passes/erase_typ_field.ml
index 9ba4550e7c6..2b5daf75d70 100644
--- a/src/ir_passes/erase_typ_field.ml
+++ b/src/ir_passes/erase_typ_field.ml
@@ -126,7 +126,7 @@ let transform prog =
       DefineE (id, mut, t_exp exp1)
     | FuncE (x, s, c, typbinds, args, ret_tys, exp) ->
       FuncE (x, s, c, t_typ_binds typbinds, t_args args, List.map t_typ ret_tys, t_exp exp)
-    | ActorE (ds, fs, {meta; preupgrade; postupgrade; heartbeat; timer; inspect}, typ) ->
+    | ActorE (ds, fs, {meta; preupgrade; postupgrade; heartbeat; timer; inspect; stable_record; stable_type}, typ) ->
       ActorE (t_decs ds, t_fields fs,
        {meta;
         preupgrade = t_exp preupgrade;
@@ -134,7 +134,11 @@ let transform prog =
         heartbeat = t_exp heartbeat;
         timer = t_exp timer;
         inspect = t_exp inspect;
-       }, t_typ typ)
+        stable_record = t_exp stable_record;
+        stable_type = t_typ stable_type;
+       },
+       t_typ typ)
+
     | NewObjE (sort, ids, t) ->
       NewObjE (sort, t_fields ids, t_typ t)
     | SelfCallE _ -> assert false
@@ -207,7 +211,7 @@ let transform prog =
   and t_comp_unit = function
     | LibU _ -> raise (Invalid_argument "cannot compile library")
     | ProgU ds -> ProgU (t_decs ds)
-    | ActorU (args_opt, ds, fs, {meta; preupgrade; postupgrade; heartbeat; timer; inspect}, t) ->
+    | ActorU (args_opt, ds, fs, {meta; preupgrade; postupgrade; heartbeat; timer; inspect; stable_record; stable_type}, t) ->
       ActorU (Option.map t_args args_opt, t_decs ds, t_fields fs,
         { meta;
           preupgrade = t_exp preupgrade;
@@ -215,7 +219,10 @@ let transform prog =
           heartbeat = t_exp heartbeat;
           timer = t_exp timer;
           inspect = t_exp inspect;
-        }, t_typ t)
+          stable_record = t_exp stable_record;
+          stable_type = t_typ stable_type;
+        },
+        t_typ t)
   and t_prog (cu, flavor) = (t_comp_unit cu, { flavor with has_typ_field = false } )
 in
   t_prog prog
diff --git a/src/ir_passes/show.ml b/src/ir_passes/show.ml
index ef100707878..23f195220d7 100644
--- a/src/ir_passes/show.ml
+++ b/src/ir_passes/show.ml
@@ -291,7 +291,7 @@ and t_exp' env = function
     NewObjE (sort, ids, t)
   | SelfCallE (ts, e1, e2, e3, e4) ->
     SelfCallE (ts, t_exp env e1, t_exp env e2, t_exp env e3, t_exp env e4)
-  | ActorE (ds, fields, {meta; preupgrade; postupgrade; heartbeat; timer; inspect}, typ) ->
+  | ActorE (ds, fields, {meta; preupgrade; postupgrade; heartbeat; timer; inspect; stable_record; stable_type}, typ) ->
     (* Until Actor expressions become their own units,
        we repeat what we do in `comp_unit` below *)
     let env1 = empty_env () in
@@ -301,6 +301,7 @@ and t_exp' env = function
     let heartbeat' = t_exp env1 heartbeat in
     let timer' = t_exp env1 timer in
     let inspect' = t_exp env1 inspect in
+    let stable_record' = t_exp env1 stable_record in
     let decls = show_decls !(env1.params) in
     ActorE (decls @ ds', fields,
       { meta;
@@ -308,8 +309,11 @@ and t_exp' env = function
         postupgrade = postupgrade';
         heartbeat = heartbeat';
         timer = timer';
-        inspect = inspect'
-      }, typ)
+        inspect = inspect';
+        stable_record = stable_record';
+        stable_type;
+      },
+      typ)
 
 and t_lexp env (e : Ir.lexp) = { e with it = t_lexp' env e.it }
 and t_lexp' env = function
@@ -338,7 +342,7 @@ and t_comp_unit = function
     let ds' = t_decs env ds in
     let decls = show_decls !(env.params) in
     ProgU (decls @ ds')
-  | ActorU (as_opt, ds, fields, {meta; preupgrade; postupgrade; heartbeat; timer; inspect}, typ) ->
+  | ActorU (as_opt, ds, fields, {meta; preupgrade; postupgrade; heartbeat; timer; inspect; stable_record; stable_type}, typ) ->
     let env = empty_env () in
     let ds' = t_decs env ds in
     let preupgrade' = t_exp env preupgrade in
@@ -346,6 +350,7 @@ and t_comp_unit = function
     let heartbeat' = t_exp env heartbeat in
     let timer' = t_exp env timer in
     let inspect' = t_exp env inspect in
+    let stable_record' = t_exp env stable_record in
     let decls = show_decls !(env.params) in
     ActorU (as_opt, decls @ ds', fields,
       { meta;
@@ -354,6 +359,8 @@ and t_comp_unit = function
         heartbeat = heartbeat';
         timer = timer';
         inspect = inspect';
+        stable_record = stable_record';
+        stable_type
       }, typ)
 
 (* Entry point for the program transformation *)
diff --git a/src/ir_passes/tailcall.ml b/src/ir_passes/tailcall.ml
index 42bb99dddee..48fb2631f9a 100644
--- a/src/ir_passes/tailcall.ml
+++ b/src/ir_passes/tailcall.ml
@@ -128,7 +128,8 @@ and exp' env e  : exp' = match e.it with
     let exp4' = exp env exp4 in
     SelfCallE (ts, exp1', exp2', exp3', exp4')
   | ActorE (ds, fs, u, t) ->
-    let u = { u with preupgrade = exp env u.preupgrade; postupgrade = exp env u.postupgrade } in
+    (* TODO: tco other upgrade fields? *)
+    let u = { u with preupgrade = exp env u.preupgrade; postupgrade = exp env u.postupgrade; stable_record = exp env u.stable_record } in
     ActorE (snd (decs env ds), fs, u, t)
   | NewObjE (s,is,t)    -> NewObjE (s, is, t)
   | PrimE (p, es)       -> PrimE (p, List.map (exp env) es)
@@ -258,7 +259,12 @@ and comp_unit env = function
   | LibU _ -> raise (Invalid_argument "cannot compile library")
   | ProgU ds -> ProgU (snd (decs env ds))
   | ActorU (as_opt, ds, fs, u, t)  ->
-    let u = { u with preupgrade = exp env u.preupgrade; postupgrade = exp env u.postupgrade } in
+    (* TODO: tco other fields of u? *)
+    let u = { u with
+              preupgrade = exp env u.preupgrade;
+              postupgrade = exp env u.postupgrade;
+              stable_record = exp env u.stable_record;
+            } in
     ActorU (as_opt, snd (decs env ds), fs, u, t)
 
 and prog (cu, flavor) =
diff --git a/src/linking/linkModule.ml b/src/linking/linkModule.ml
index 403e9aa14a3..6adf1535312 100644
--- a/src/linking/linkModule.ml
+++ b/src/linking/linkModule.ml
@@ -6,6 +6,7 @@ plus the dylink section.
 open Wasm_exts.Ast
 open Wasm.Source
 open Wasm_exts.CustomModule
+module I64_convert = Wasm.I64_convert
 
 (*
 This module is a first stab that should be functionally working, but will go
@@ -91,6 +92,8 @@ and resolving.
 *)
 
 (* Linking *)
+exception LinkError of string
+exception TooLargeDataSegments of string
 
 type imports = (int32 * name) list
 
@@ -99,6 +102,18 @@ let phrase f x = { x with it = f x.it }
 let map_module f (em : extended_module) = { em with module_ = f em.module_ }
 let map_name_section f (em : extended_module) = { em with name = f em.name }
 
+(* Distinction between Memory64 and Memory32 *)
+
+let uses_memory64 (m: module_') : bool =
+  let open Wasm_exts.Types in
+  let MemoryType(_, index_type) = match m.memories with
+  | [] -> raise (LinkError "Expect at least one memory in module")
+  | memory::_ -> memory.it.mtype
+  in
+  match index_type with
+  | I64IndexType -> true
+  | I32IndexType -> false
+
 (* Generic functions about import and export lists *)
 
 let get_import is_thing j m =
@@ -188,7 +203,7 @@ let is_global_export = function
   | _ -> None
 
 
-let get_fun_typ i m : Wasm.Types.func_type =
+let get_fun_typ i m : Wasm_exts.Types.func_type =
   let imports_n = count_imports is_fun_import m in
   let tyvar =
     if i < imports_n
@@ -202,7 +217,7 @@ let get_fun_typ i m : Wasm.Types.func_type =
     in
   (Lib.List32.nth m.types tyvar).it
 
-let get_global_typ i m : Wasm.Types.global_type =
+let get_global_typ i m : Wasm_exts.Types.global_type =
   let imports_n = count_imports is_global_import m in
   if i < imports_n
   then
@@ -273,9 +288,6 @@ let remove_non_ic_exports (em : extended_module) : extended_module =
 
 (* Generic linking logic *)
 
-exception LinkError of string
-exception TooLargeDataSegments of string
-
 type renumbering = int32 -> int32
 
 let resolve imports exports : (int32 * int32) list =
@@ -405,22 +417,28 @@ let set_global global value = fun m ->
   let rec go i = function
     | [] -> assert false
     | g::gs when i = Int32.to_int global ->
-      let open Wasm.Types in
-      assert (g.it.gtype = GlobalType (I32Type, Immutable));
+      let open Wasm_exts.Types in
+      let global_value = if uses_memory64 m then
+        (assert (g.it.gtype = GlobalType (I64Type, Immutable));
+        Wasm_exts.Values.I64 (Int64.of_int32 value))
+      else
+        (assert (g.it.gtype = GlobalType (I32Type, Immutable));
+        Wasm_exts.Values.I32 value)
+      in
       let g = phrase (fun g' ->
-        { g' with value = [Const (Wasm.Values.I32 value @@ g.at) @@ g.at] @@ g.at }
+        { g' with value = [Const (global_value @@ g.at) @@ g.at] @@ g.at }
       ) g in
       g :: gs
     | g::gs -> g :: go (i+1) gs
   in
   { m with globals = go 0 m.globals }
 
-let fill_global (global : int32) (value : int32) : module_' -> module_' = fun m ->
+let fill_global (global : int32) (value : Wasm_exts.Values.value) (uses_memory64 : bool) : module_' -> module_' = fun m ->
   let rec instr' = function
     | Block (ty, is) -> Block (ty, instrs is)
     | Loop (ty, is) -> Loop (ty, instrs is)
     | If (ty, is1, is2) -> If (ty, instrs is1, instrs is2)
-    | GlobalGet v when v.it = global -> Const (Wasm.Values.I32 value @@ v.at)
+    | GlobalGet v when v.it = global -> Const (value @@ v.at)
     | GlobalSet v when v.it = global -> assert false
     | i -> i
   and instr i = phrase instr' i
@@ -432,11 +450,25 @@ let fill_global (global : int32) (value : int32) : module_' -> module_' = fun m
 
   let const = phrase instrs in
 
+  (* For 64-bit, convert the constant expression of the table segment offset to 32-bit. *)
+  let const_instr_to_32' = function
+    | Const { it = (Wasm_exts.Values.I64 number); at } -> Const ((Wasm_exts.Values.I32 (Int64.to_int32 number)) @@ at)
+    | GlobalGet v -> GlobalGet v
+    | _ -> assert false
+  in
+  let const_instr_to_32 i = phrase const_instr_to_32' i in
+  let convert_const_to_32' = List.map const_instr_to_32 in
+  let convert_const_to_32 = phrase convert_const_to_32' in
+  let table_const offset = 
+    let expr = const offset in
+    if uses_memory64 then convert_const_to_32 expr else expr
+  in
+
   let global' g = { g with value = const g.value } in
   let global = phrase global' in
   let globals = List.map global in
 
-  let table_segment' (s : var list segment') = { s with offset = const s.offset; } in
+  let table_segment' (s : var list segment') = { s with offset = table_const s.offset; } in
   let table_segment = phrase (table_segment') in
   let table_segments = List.map table_segment in
 
@@ -514,15 +546,19 @@ let rename_types rn m =
 let read_global gi (m : module_') : int32 =
   let n_impo = count_imports is_global_import m in
   let g = List.nth m.globals (Int32.(to_int (sub gi n_impo))) in
-  let open Wasm.Types in
-  assert (g.it.gtype = GlobalType (I32Type, Immutable));
-  match g.it.value.it with
-  | [{ it = Const {it = Wasm.Values.I32 i;_}; _}] -> i
+  let open Wasm_exts.Types in
+  match uses_memory64 m, g.it.value.it with
+  | true, [{ it = Const {it = Wasm_exts.Values.I64 i;_}; _}] -> 
+    assert (g.it.gtype = GlobalType (I64Type, Immutable));
+    Int64.to_int32 i
+  | false, [{ it = Const {it = Wasm_exts.Values.I32 i;_}; _}] ->
+    assert (g.it.gtype = GlobalType (I32Type, Immutable));
+    i
   | _ -> assert false
 
 let read_table_size (m : module_') : int32 =
   (* Assumes there is one table *)
-  let open Wasm.Types in
+  let open Wasm_exts.Types in
   match m.tables with
   | [t] ->
     let TableType ({min;max}, _) = t.it.ttype in
@@ -532,26 +568,27 @@ let read_table_size (m : module_') : int32 =
   | _ -> raise (LinkError "Expect one table in first module")
 
 let set_memory_size new_size_bytes : module_' -> module_' = fun m ->
-  let open Wasm.Types in
-  let page_size = Int32.of_int (64*1024) in
-  let new_size_pages = Int32.(add (div new_size_bytes page_size) 1l) in
+  let open Wasm_exts.Types in
+  let page_size = Int64.of_int (64*1024) in
+  let new_size_pages = Int64.(add (div new_size_bytes page_size) 1L) in
+  let index_type = if uses_memory64 m then I64IndexType else I32IndexType in
   match m.memories with
   | [t;t1] ->
     { m with
       memories = [(phrase (fun m ->
-        { mtype = MemoryType ({min = new_size_pages; max = None}) }
+        { mtype = MemoryType ({min = new_size_pages; max = None}, index_type) }
         ) t); t1]
     }
   | [t] ->
     { m with
       memories = [phrase (fun m ->
-        { mtype = MemoryType ({min = new_size_pages; max = None}) }
+        { mtype = MemoryType ({min = new_size_pages; max = None}, index_type) }
       ) t]
     }
   | _ -> raise (LinkError "Expect one memory in first module")
 
 let set_table_size new_size : module_' -> module_' = fun m ->
-  let open Wasm.Types in
+  let open Wasm_exts.Types in
   match m.tables with
   | [t] ->
     { m with
@@ -563,7 +600,7 @@ let set_table_size new_size : module_' -> module_' = fun m ->
   | _ -> raise (LinkError "Expect one table in first module")
 
 
-let fill_item_import module_name item_name new_base (m : module_') : module_' =
+let fill_item_import module_name item_name new_base uses_memory64 (m : module_') : module_' =
   (* We need to find the right import,
      replace all uses of get_global of that import with the constant,
      and finally rename all globals
@@ -582,7 +619,13 @@ let fill_item_import module_name item_name new_base (m : module_') : module_' =
           go i is
     in go 0 m.imports in
 
-    m |> fill_global base_global new_base
+    let new_base_value = if uses_memory64 then
+      Wasm_exts.Values.I64 (I64_convert.extend_i32_u new_base)
+    else
+      Wasm_exts.Values.I32 new_base
+    in
+
+    m |> fill_global base_global new_base_value uses_memory64
       |> remove_imports is_global_import [base_global, base_global]
       |> rename_globals Int32.(fun i ->
           if i < base_global then i
@@ -590,18 +633,21 @@ let fill_item_import module_name item_name new_base (m : module_') : module_' =
           else sub i one
         )
 
-let fill_memory_base_import new_base : module_' -> module_' =
-  fill_item_import "env" "__memory_base" new_base
-
-let fill_table_base_import new_base : module_' -> module_' =
-  fill_item_import "env" "__table_base" new_base
-
+let fill_memory_base_import new_base uses_memory64 : module_' -> module_' =
+  fill_item_import "env" "__memory_base" new_base uses_memory64
 
+let fill_table_base_import new_base uses_memory64 : module_' -> module_' = fun m ->
+  let m = fill_item_import "env" "__table_base" new_base uses_memory64 m in
+  if uses_memory64 then
+    fill_item_import "env" "__table_base32" new_base uses_memory64 m
+  else
+    m
+   
 (* Concatenation of modules *)
 
 let join_modules
       (em1 : extended_module) (m2 : module_') (ns2 : name_section)
-      (type_indices : (Wasm.Types.func_type, int32) Hashtbl.t) : extended_module =
+      (type_indices : (Wasm_exts.Types.func_type, int32) Hashtbl.t) : extended_module =
   let m1 = em1.module_ in
   let joined = 
     { em1 with
@@ -630,7 +676,7 @@ let join_modules
      we'll have the unit function in the type section already. *)
   match m2.start with
   | None -> joined
-  | Some fi -> prepend_to_start fi.it (Hashtbl.find type_indices (Wasm.Types.FuncType ([], []))) joined
+  | Some fi -> prepend_to_start fi.it (Hashtbl.find type_indices (Wasm_exts.Types.FuncType ([], []))) joined
 
 (* The main linking function *)
 
@@ -649,12 +695,12 @@ let check_typ is_thing get_typ string_of m1 m2 (i1, i2) =
     raise (LinkError msg)
 
 let check_fun_typ =
-  check_typ is_fun_import get_fun_typ Wasm.Types.string_of_func_type
+  check_typ is_fun_import get_fun_typ Wasm_exts.Types.string_of_func_type
 let check_global_typ =
-  check_typ is_global_import get_global_typ Wasm.Types.string_of_global_type
+  check_typ is_global_import get_global_typ Wasm_exts.Types.string_of_global_type
 
 
-let align p n =
+let align_i32 p n =
   let open Int32 in
   let p = to_int p in
   shift_left (shift_right_logical (add n (sub (shift_left 1l p) 1l)) p) p
@@ -691,10 +737,10 @@ let add_globals (globals0 : global list) (insert0 : (int32 * global') list) : gl
   go 0l globals0 insert0
 
 let mk_i32_const (i : int32) =
-  Const (Wasm.Values.I32 i @@ no_region) @@ no_region
+  Const (Wasm_exts.Values.I32 i @@ no_region) @@ no_region
 
 let mk_i32_global (i : int32) =
-  { gtype = Wasm.Types.GlobalType (Wasm.Types.I32Type, Wasm.Types.Immutable);
+  { gtype = Wasm_exts.Types.GlobalType (Wasm_exts.Types.I32Type, Wasm_exts.Types.Immutable);
     value = [mk_i32_const i] @@ no_region }
 
 (* Generate (global index, function index) pairs for GOT.func imports of a
@@ -784,27 +830,32 @@ let link (em1 : extended_module) libname (em2 : extended_module) =
 
   (* Beginning of unused space *)
   let old_heap_start = read_global heap_global em1.module_ in
-  let lib_heap_start = align dylink.memory_alignment old_heap_start in
-  let new_heap_start = align 4l (Int32.add lib_heap_start dylink.memory_size) in
-
-  (* The RTs data segments must fit below 4.5MB according to the persistent heap layout. 
-     The first 4MB are reserved for the Rust call stack such that RTS data segments are limited to 512KB. *)
-  let max_rts_stack_size = 4 * 1024 * 1024 in
-  let max_rts_data_segment_size = 512 * 1024 in
-  (if (Int32.to_int new_heap_start) > max_rts_stack_size + max_rts_data_segment_size then
-    (raise (TooLargeDataSegments (Printf.sprintf "The Wasm data segment size exceeds the supported maxmimum of %nMB." max_rts_data_segment_size)))
-  else
-    ()
-  );
+  let lib_heap_start = align_i32 dylink.memory_alignment old_heap_start in
+  let new_heap_start = align_i32 8l (Int32.add lib_heap_start dylink.memory_size) in
+
+  if uses_memory64 em1.module_ then
+  begin
+    (* The RTS data segments must fit below 4.5MB according to the persistent heap layout. 
+      The first 4MB are reserved for the Rust call stack such that RTS data segments are limited to 512KB. *)
+    let max_rts_stack_size = 4 * 1024 * 1024 in
+    let max_rts_data_segment_size = 512 * 1024 in
+    (if (Int32.to_int new_heap_start) > max_rts_stack_size + max_rts_data_segment_size then
+      (raise (TooLargeDataSegments (Printf.sprintf "The Wasm data segment size exceeds the supported maxmimum of %nMB." max_rts_data_segment_size)))
+    else
+      ()
+    )
+  end else ();
 
   let old_table_size = read_table_size em1.module_ in
-  let lib_table_start = align dylink.table_alignment old_table_size in
+  let lib_table_start = align_i32 dylink.table_alignment old_table_size in
+
+  let uses_memory64 = uses_memory64 em1.module_ in
 
   (* Fill in memory and table base pointers *)
   let dm2 = em2.module_
-    |> fill_memory_base_import lib_heap_start
-    |> fill_table_base_import lib_table_start in
-
+    |> fill_memory_base_import lib_heap_start uses_memory64
+    |> fill_table_base_import lib_table_start uses_memory64 in
+    
   let got_func_imports = collect_got_func_imports dm2 in
 
   (* Link functions *)
@@ -848,11 +899,11 @@ let link (em1 : extended_module) libname (em2 : extended_module) =
   (* Rename types in both modules to eliminate duplicate types. *)
 
   (* Maps function types to their indices in the new module we're creating *)
-  let type_indices : (Wasm.Types.func_type, int32) Hashtbl.t = Hashtbl.create 100 in
+  let type_indices : (Wasm_exts.Types.func_type, int32) Hashtbl.t = Hashtbl.create 100 in
 
   (* Get index of a function type. Creates a new one if we haven't added this
      type yet. *)
-  let add_or_get_ty (ty : Wasm.Types.func_type) =
+  let add_or_get_ty (ty : Wasm_exts.Types.func_type) =
     match Hashtbl.find_opt type_indices ty with
     | None ->
       let idx = Int32.of_int (Hashtbl.length type_indices) in
@@ -863,25 +914,28 @@ let link (em1 : extended_module) libname (em2 : extended_module) =
   in
 
   (* Rename a type in a module. First argument is the list of types in the module. *)
-  let ty_renamer (tys : Wasm.Types.func_type phrase list) (t : int32) : int32 =
+  let ty_renamer (tys : Wasm_exts.Types.func_type phrase list) (t : int32) : int32 =
     let fun_ty = List.nth tys (Int32.to_int t) in
     add_or_get_ty fun_ty.it
   in
 
-  (* Check that the first module generated by the compiler backend does not use 
-     active data segments. *)
   let is_active data_segment = match data_segment.it.dmode.it with
   | Active _ -> true
   | _ -> false
   in
   let em1_active_data_segments = List.filter is_active em1.module_.datas in
-  assert ((List.length em1_active_data_segments) = 0);
-  
   let is_passive data_segment = match data_segment.it.dmode.it with 
   | Passive -> true
   | _ -> false
   in
   let em1_passive_data_segments = List.filter is_passive em1.module_.datas in
+  
+  (* Check that the first module generated by the compiler backend does not use 
+     active data segments. *)
+  if uses_memory64 then
+    assert ((List.length em1_active_data_segments) = 0)
+  else ();
+  
   let dm2_data_segment_offset = List.length em1_passive_data_segments in
 
   (* Rename types in first module *)
@@ -906,7 +960,7 @@ let link (em1 : extended_module) libname (em2 : extended_module) =
   let add_call_ctors =
     match NameMap.find_opt (Lib.Utf8.decode "__wasm_call_ctors") fun_exports2 with
     | None -> fun em -> em
-    | Some fi -> prepend_to_start (funs2 fi) (add_or_get_ty (Wasm.Types.FuncType ([], [])))
+    | Some fi -> prepend_to_start (funs2 fi) (add_or_get_ty (Wasm_exts.Types.FuncType ([], [])))
   in
 
   let new_table_size =
@@ -914,7 +968,8 @@ let link (em1 : extended_module) libname (em2 : extended_module) =
   in
 
   (* Rust generates active data segments for the runtime system code that are not supported with orthogonal persistence.
-     Therefore, make the data segments passive and load them on initialization to their reserved static space. 
+     Therefore, for enhanced orthogonal persistence, make the data segments passive and load them on initialization to 
+     their reserved static space.
      Note: If Rust would also use passive data segments in future, the segment load indices need to be renumbered. *)
   let make_rts_data_segments_passive : module_' -> module_' = fun m ->
     let segment_mode' (dmode : segment_mode') = 
@@ -942,7 +997,7 @@ let link (em1 : extended_module) libname (em2 : extended_module) =
   
       let load_passive_segment index data_segment =
         let segment_index = Int32.of_int (Int.add index numbering_offset) in
-        let compile_const_i32 value = Const (Wasm.Values.I32 value @@ no_region) @@ no_region in
+        let compile_const_i32 value = Const (Wasm_exts.Values.I32 value @@ no_region) @@ no_region in
         let data_target = match data_segment.it.dmode.it with
           | Active { offset; _ } -> offset.it
           | _ -> raise (LinkError "Passive data segments are not yet supported in the RTS module")
@@ -979,11 +1034,11 @@ let link (em1 : extended_module) libname (em2 : extended_module) =
     |> rename_funcs_extended funs1
     |> rename_globals_extended globals1
     |> map_module (set_global heap_global new_heap_start)
-    |> map_module (set_memory_size new_heap_start)
+    |> map_module (set_memory_size (I64_convert.extend_i32_u new_heap_start))
     |> map_module (set_table_size new_table_size)
     )
     ( dm2
-    |> make_rts_data_segments_passive
+    |> (if uses_memory64 then make_rts_data_segments_passive else (fun m -> m))
     |> remove_imports is_fun_import fun_resolved21
     |> remove_imports is_global_import global_resolved21
     |> remove_imports is_memory_import [0l, 0l]
@@ -999,7 +1054,7 @@ let link (em1 : extended_module) libname (em2 : extended_module) =
     type_indices
   |> add_call_ctors
   |> remove_non_ic_exports (* only sane if no additional files get linked in *)
-  |> map_module (load_rts_data_segments dm2_data_segment_offset dm2.datas)
+  |> (if uses_memory64 then map_module (load_rts_data_segments dm2_data_segment_offset dm2.datas) else (fun m -> m))
   in
 
   (* Rename global and function indices in GOT.func stuff *)
diff --git a/src/lowering/desugar.ml b/src/lowering/desugar.ml
index 8e22d11fa89..9c498035d41 100644
--- a/src/lowering/desugar.ml
+++ b/src/lowering/desugar.ml
@@ -474,7 +474,12 @@ and export_runtime_information self_id =
   let bind2 = typ_arg scope_con2 Scope scope_bound in
   let gc_strategy = 
     let open Mo_config in
-    let strategy = "incremental" in
+    let strategy = match !Flags.gc_strategy with
+    | Flags.Default -> "default"
+    | Flags.MarkCompact -> "compacting"
+    | Flags.Copying -> "copying"
+    | Flags.Generational -> "generational"
+    | Flags.Incremental -> "incremental" in
     if !Flags.force_gc then (Printf.sprintf "%s force" strategy) else strategy
   in
   let prim_call function_name = primE (I.OtherPrim function_name) [] in
@@ -564,7 +569,7 @@ and build_actor at ts self_id es obj_typ =
     let vs = fresh_vars "v" (List.map (fun f -> f.T.typ) fields) in
     blockE
       ((match call_system_func_opt "preupgrade" es obj_typ with
-        | Some call -> [ expD (primE (I.ICPerformGC) []); expD call]
+        | Some call -> [ expD call]
         | None -> []) @
          [letP (seqP (List.map varP vs)) (* dereference any mutable vars, option 'em all *)
             (seqE (List.map (fun (i,t) -> optE (varE (var i t))) ids))])
@@ -580,7 +585,7 @@ and build_actor at ts self_id es obj_typ =
   let runtime_info_d, runtime_info_f = export_runtime_information self_id in
   I.(ActorE (footprint_d @ runtime_info_d @ ds', footprint_f @ runtime_info_f @ fs,
      { meta;
-       preupgrade = with_stable_vars (fun e -> primE (I.ICStableWrite ty) [e]);
+       preupgrade = (primE (I.ICStableWrite ty) []);
        postupgrade =
          (match call_system_func_opt "postupgrade" es obj_typ with
           | Some call -> call
@@ -600,11 +605,12 @@ and build_actor at ts self_id es obj_typ =
        inspect =
          (match call_system_func_opt "inspect" es obj_typ with
           | Some call -> call
-          | None -> tupE [])
+          | None -> tupE []);
+       stable_record = with_stable_vars (fun e -> e);
+       stable_type = ty;
      },
      obj_typ))
 
-
 and stabilize stab_opt d =
   let s = match stab_opt with None -> S.Flexible | Some s -> s.it  in
   match s, d.it with
@@ -1030,7 +1036,7 @@ let import_compiled_class (lib : S.comp_unit) wasm : import_declaration =
   let c', _ = T.as_con (List.hd cs') in
   let install_actor_helper = var "@install_actor_helper"
     T.(Func (Local, Returns, [scope_bind],
-      [install_arg_typ; blob; blob],
+      [install_arg_typ; bool; blob; blob],
       [Async(Cmp, Var (default_scope_var, 0), principal)]))
   in
   let wasm_blob = fresh_var "wasm_blob" T.blob in
@@ -1050,6 +1056,7 @@ let import_compiled_class (lib : S.comp_unit) wasm : import_declaration =
           (callE (varE install_actor_helper) cs'
             (tupE [
               install_arg;
+              boolE ((!Mo_config.Flags.enhanced_orthogonal_persistence)); 
               varE wasm_blob;
               primE (Ir.SerializePrim ts1') [seqE (List.map varE vs)]])))
         (primE (Ir.CastPrim (T.principal, t_actor)) [varE principal]))
@@ -1121,18 +1128,22 @@ let transform_unit_body (u : S.comp_unit_body) : Ir.comp_unit =
         T.promote rng
       | _ -> assert false
     in
+    let actor_expression = build_actor u.at ts (Some self_id) fields obj_typ in
     let e = wrap {
-       it = build_actor u.at ts (Some self_id) fields obj_typ;
+       it = actor_expression;
        at = no_region;
        note = Note.{ def with typ = obj_typ } }
     in
     begin match e.it with
-    | I.ActorE(ds, fs, u, t) -> I.ActorU (Some args, ds, fs, u, t)
+    | I.ActorE(ds, fs, u, t) ->
+      I.ActorU (Some args, ds, fs, u, t)
     | _ -> assert false
     end
   | S.ActorU (self_id, fields) ->
-    begin match build_actor u.at [] self_id fields u.note.S.note_typ with
-    | I.ActorE (ds, fs, u, t) -> I.ActorU (None, ds, fs, u, t)
+    let actor_expression = build_actor u.at [] self_id fields u.note.S.note_typ in
+    begin match actor_expression with
+    | I.ActorE (ds, fs, u, t) ->
+        I.ActorU (None, ds, fs, u, t)
     | _ -> assert false
     end
 
diff --git a/src/mo_config/flags.ml b/src/mo_config/flags.ml
index bc7011895c9..2b92d84b80e 100644
--- a/src/mo_config/flags.ml
+++ b/src/mo_config/flags.ml
@@ -4,6 +4,13 @@ module M = Map.Make(String)
 
 type compile_mode = WasmMode | ICMode | RefMode | WASIMode
 
+type gc_strategy = Default | MarkCompact | Copying | Generational | Incremental
+
+type instruction_limits = {
+  upgrade: int;
+  update_call: int;
+}
+
 let trace = ref false
 let verbose = ref false
 let print_warnings = ref true
@@ -38,13 +45,28 @@ let omit_metadata_names : string list ref = ref []
 let compiled = ref false
 let error_detail = ref 2
 let sanity = ref false
+let gc_strategy = ref Default
 let force_gc = ref false
 let global_timer = ref true
 let experimental_field_aliasing = ref false
 let ocaml_js = ref false
-let rtti = ref true
+let rts_stack_pages_default = 32 (* 2MB *)
+let rts_stack_pages : int option ref = ref None
+let rtti = ref false
 let trap_on_call_error = ref false
 let use_stable_regions = ref false
+let enhanced_orthogonal_persistence = ref false
 let share_code = ref false
+let stabilization_instruction_limit_default = {
+  upgrade = 180_000_000_000; (* 200 billion limit with 10% reserve *)
+  update_call = 18_000_000_000; (* 20 billion limit with 10% reserve *)
+}
+let stabilization_instruction_limit = ref stabilization_instruction_limit_default
+let stable_memory_access_limit_default = 
+  let gigabyte = 1024 * 1024 * 1024 in {
+  upgrade = 6 * gigabyte; (* 8 GB limit with 2 GB reserves *)
+  update_call = 1 * gigabyte; (* 2 GB limit with 1 GB reserve *)
+}
+let stable_memory_access_limit = ref stable_memory_access_limit_default
 let experimental_stable_memory_default = 0 (* _ < 0: error; _ = 0: warn, _ > 0: allow *)
 let experimental_stable_memory = ref experimental_stable_memory_default
diff --git a/src/pipeline/pipeline.ml b/src/pipeline/pipeline.ml
index 23e9a5c649a..d24ebee6b14 100644
--- a/src/pipeline/pipeline.ml
+++ b/src/pipeline/pipeline.ml
@@ -678,14 +678,47 @@ let ir_passes mode prog_ir name =
 (* Compilation *)
 
 let load_as_rts () =
-  let rts = match !Flags.sanity with
-    | false -> Rts.wasm_release
-    | true -> Rts.wasm_debug
+  let rts = match (!Flags.enhanced_orthogonal_persistence, !Flags.sanity, !Flags.gc_strategy) with
+    | (true, false, Flags.Incremental) -> Rts.wasm_eop_release
+    | (true, true, Flags.Incremental) -> Rts.wasm_eop_debug
+    | (false, false, Flags.Copying) 
+    | (false, false, Flags.MarkCompact)
+    | (false, false, Flags.Generational) -> Rts.wasm_non_incremental_release
+    | (false, true, Flags.Copying)
+    | (false, true, Flags.MarkCompact)
+    | (false, true, Flags.Generational) -> Rts.wasm_non_incremental_debug
+    | (false, false, Flags.Incremental) -> Rts.wasm_incremental_release
+    | (false, true, Flags.Incremental) -> Rts.wasm_incremental_debug
+    | _ -> assert false
   in
   Wasm_exts.CustomModuleDecode.decode "rts.wasm" (Lazy.force rts)
 
 type compile_result = (Idllib.Syntax.prog * Wasm_exts.CustomModule.extended_module) Diag.result
 
+let invalid_flag message =
+  builtin_error "compile" (Printf.sprintf "Invalid compiler flag combination: %s" message) []
+
+let adjust_flags () =
+  if !Flags.enhanced_orthogonal_persistence then
+    begin
+      (match !Flags.gc_strategy with
+      | Flags.Default | Flags.Incremental -> Flags.gc_strategy := Flags.Incremental;
+      | Flags.Copying -> invalid_flag "--copying-gc is not supported with --enhanced-orthogonal-persistence"
+      | Flags.MarkCompact -> invalid_flag "--compacting-gc is not supported with --enhanced-orthogonal-persistence"
+      | Flags.Generational -> invalid_flag "--generational-gc is not supported with --enhanced-orthogonal-persistence");
+      (if !Flags.rts_stack_pages <> None then invalid_flag "--rts-stack-pages is not supported with --enhanced-orthogonal-persistence");
+      Flags.rtti := true
+    end
+  else
+    begin
+      (if !Flags.gc_strategy = Flags.Default then Flags.gc_strategy := Flags.Copying);
+      (if !Flags.rts_stack_pages = None then Flags.rts_stack_pages := Some Flags.rts_stack_pages_default);
+      (if !Flags.stabilization_instruction_limit <> Flags.stabilization_instruction_limit_default then
+        invalid_flag "--stabilization-instruction-limit is only supported with --enhanced-orthogonal-persistence");
+      (if !Flags.stable_memory_access_limit <> Flags.stable_memory_access_limit_default then
+        invalid_flag "--stable-memory-access-limit is only supported with --enhanced-orthogonal-persistence")
+    end
+
 (* This transforms the flat list of libs (some of which are classes)
    into a list of imported libs and (compiled) classes *)
 let rec compile_libs mode libs : Lowering.Desugar.import_declaration =
@@ -707,8 +740,12 @@ and compile_unit mode do_link imports u : Wasm_exts.CustomModule.extended_module
   let prog_ir = desugar_unit imports u name in
   let prog_ir = ir_passes mode prog_ir name in
   phase "Compiling" name;
+  adjust_flags ();
   let rts = if do_link then Some (load_as_rts ()) else None in
-  Codegen.Compile.compile mode rts prog_ir
+  if !Flags.enhanced_orthogonal_persistence then
+    Codegen.Compile_enhanced.compile mode rts prog_ir
+  else
+    Codegen.Compile_classical.compile mode rts prog_ir
 
 and compile_unit_to_wasm mode imports (u : Syntax.comp_unit) : string =
   let wasm_mod = compile_unit mode true imports u in
diff --git a/src/prelude/internals.mo b/src/prelude/internals.mo
index b6116f27731..d5aff87983d 100644
--- a/src/prelude/internals.mo
+++ b/src/prelude/internals.mo
@@ -436,8 +436,10 @@ func @install_actor_helper(
       #upgrade : actor {} ;
       #upgrade_with_persistence : { wasm_memory_persistence: WasmMemoryPersistence; canister: actor {} };
     },
+    enhanced_orthogonal_persistence : Bool,
     wasm_module : Blob,
-    arg : Blob)
+    arg : Blob,
+    )
   : async* Principal = async* {
   let (mode, canister_id) =
     switch install_arg {
@@ -457,8 +459,13 @@ func @install_actor_helper(
         (#reinstall, (prim "principalOfActor" : (actor {}) -> Principal) actor1)
       };
       case (#upgrade actor2) {
+        let wasm_memory_persistence = if enhanced_orthogonal_persistence { 
+          ?(#Keep) 
+        } else { 
+          null 
+        };
         let upgradeOptions = {
-          wasm_memory_persistence = ?(#Keep);
+          wasm_memory_persistence;
         };
         ((#upgrade (?upgradeOptions)), (prim "principalOfActor" : (actor {}) -> Principal) actor2)
       };
@@ -668,5 +675,5 @@ func @cancelTimer(id : Nat) {
   }
 };
 
-func @set_global_timer(time : Nat64) = ignore (prim "global_timer_set" : Nat64 -> Nat64) time;
 
+func @set_global_timer(time : Nat64) = ignore (prim "global_timer_set" : Nat64 -> Nat64) time;
diff --git a/src/rts/gen.sh b/src/rts/gen.sh
index 513d2df5b52..34624d62f5f 100755
--- a/src/rts/gen.sh
+++ b/src/rts/gen.sh
@@ -11,5 +11,11 @@ if [ -z "$1" ]; then
   exit 1
 fi
 
-perl -0777 -ne 'print "let wasm_release = lazy \""; printf "\\x%02x", $_ for unpack("C*", $_); print "\"\n";' "$1/mo-rts.wasm" > "$file"
-perl -0777 -ne 'print "let wasm_debug = lazy \""; printf "\\x%02x", $_ for unpack("C*", $_); print "\"";' "$1/mo-rts-debug.wasm" >> "$file"
+perl -0777 -ne 'print "let wasm_non_incremental_release = lazy \""; printf "\\x%02x", $_ for unpack("C*", $_); print "\"\n";' "$1/mo-rts-non-incremental.wasm" > "$file"
+perl -0777 -ne 'print "let wasm_non_incremental_debug = lazy \""; printf "\\x%02x", $_ for unpack("C*", $_); print "\"";' "$1/mo-rts-non-incremental-debug.wasm" >> "$file"
+
+perl -0777 -ne 'print "let wasm_incremental_release = lazy \""; printf "\\x%02x", $_ for unpack("C*", $_); print "\"\n";' "$1/mo-rts-incremental.wasm" >> "$file"
+perl -0777 -ne 'print "let wasm_incremental_debug = lazy \""; printf "\\x%02x", $_ for unpack("C*", $_); print "\"";' "$1/mo-rts-incremental-debug.wasm" >> "$file"
+
+perl -0777 -ne 'print "let wasm_eop_release = lazy \""; printf "\\x%02x", $_ for unpack("C*", $_); print "\"\n";' "$1/mo-rts-eop.wasm" >> "$file"
+perl -0777 -ne 'print "let wasm_eop_debug = lazy \""; printf "\\x%02x", $_ for unpack("C*", $_); print "\"";' "$1/mo-rts-eop-debug.wasm" >> "$file"
diff --git a/src/rts/rts.ml b/src/rts/rts.ml
index 39fff73cb41..f62381a16aa 100644
--- a/src/rts/rts.ml
+++ b/src/rts/rts.ml
@@ -1,8 +1,8 @@
 (*
-This source file loads the RTS (`mo-rts.wasm` etc.) via the environment
+This source file loads the RTS Wasm files via the environment
 variables. This is for local development (e.g. inside `nix-shell`). The nix
 build of `moc` will statically replace this file with one that just embeds
-`mo-rts.wasm` etc. as a static string, to produce a fully self-contained `moc`
+RTS Wasm files as static strings, to produce a fully self-contained `moc`
 binary for distribution.
 *)
 
@@ -19,5 +19,11 @@ let load_file env =
     Printf.eprintf "Environment variable %s not set. Please run moc via the bin/moc wrapper (which should be in your PATH in the nix-shell)." env;
     exit 1
 
-let wasm_release : string Lazy.t = lazy (load_file "MOC_RELEASE_RTS")
-let wasm_debug : string Lazy.t = lazy (load_file "MOC_DEBUG_RTS")
+let wasm_non_incremental_release : string Lazy.t = lazy (load_file "MOC_NON_INCREMENTAL_RELEASE_RTS")
+let wasm_non_incremental_debug : string Lazy.t = lazy (load_file "MOC_NON_INCREMENTAL_DEBUG_RTS")
+
+let wasm_incremental_release : string Lazy.t = lazy (load_file "MOC_INCREMENTAL_RELEASE_RTS")
+let wasm_incremental_debug : string Lazy.t = lazy (load_file "MOC_INCREMENTAL_DEBUG_RTS")
+
+let wasm_eop_release : string Lazy.t = lazy (load_file "MOC_EOP_RELEASE_RTS")
+let wasm_eop_debug : string Lazy.t = lazy (load_file "MOC_EOP_DEBUG_RTS")
diff --git a/src/rts/rts.mli b/src/rts/rts.mli
index 27b30301429..5b9cdbacb96 100644
--- a/src/rts/rts.mli
+++ b/src/rts/rts.mli
@@ -1,2 +1,9 @@
-val wasm_release : string Lazy.t
-val wasm_debug : string Lazy.t
+val wasm_non_incremental_release : string Lazy.t
+val wasm_non_incremental_debug : string Lazy.t
+
+val wasm_incremental_release : string Lazy.t
+val wasm_incremental_debug : string Lazy.t
+
+(* eop = enhanced orthogonal persistence *)
+val wasm_eop_release : string Lazy.t
+val wasm_eop_debug : string Lazy.t
diff --git a/src/wasm-exts/ast.ml b/src/wasm-exts/ast.ml
index 60b4d161d3f..35b8d74537a 100644
--- a/src/wasm-exts/ast.ml
+++ b/src/wasm-exts/ast.ml
@@ -5,6 +5,7 @@ reference implementation.
 Base revision: WebAssembly/spec@a7a1856.
 
 The changes are:
+ * Manual selective support for bulk-memory operations `memory_copy` and `memory_fill` (WebAssembly/spec@7fa2f20).
  * Pseudo-instruction Meta for debug information
  * StableMemory, StableGrow, StableRead, StableWrite instructions.
  * Support for passive data segments (incl. `MemoryInit`).
@@ -31,9 +32,7 @@ easily apply diffs from the original code (possibly manually).
  * These conventions mostly follow standard practice in language semantics.
  *)
 
-open Wasm.Types
-module Values = Wasm.Values
-module Memory = Wasm.Memory
+open Types
 open Wasm.Source
 
 
@@ -112,7 +111,13 @@ and instr' =
   | Store of storeop                  (* write memory at address *)
   | MemorySize                        (* size of linear memory *)
   | MemoryGrow                        (* grow linear memory *)
+  (* Manual extension for bulk memory operations *)
+  | MemoryFill                        (* fill memory range with value *)
+  | MemoryCopy                        (* copy memory ranges *)
+  (* End of manual extension *)
+  (* Manual extension for passive data segments *)
   | MemoryInit of var                 (* initialize memory range from segment *)
+  (* End of manual extension *)
   | Const of literal                  (* constant *)
   | Test of testop                    (* numeric test *)
   | Compare of relop                  (* numeric comparison *)
diff --git a/src/wasm-exts/customModuleDecode.ml b/src/wasm-exts/customModuleDecode.ml
index 39db6267709..905c1b23a6a 100644
--- a/src/wasm-exts/customModuleDecode.ml
+++ b/src/wasm-exts/customModuleDecode.ml
@@ -1,9 +1,11 @@
 (*
-This module originated as a copy of interpreter/binary/encode.ml in the
+This module originated as a copy of interpreter/binary/decode.ml in the
 reference implementation.
+With adjustments from memory64.
 
 The changes are:
  * Support for additional custom sections
+ * Manual selective support for bulk-memory operations `memory_copy` and `memory_fill` (WebAssembly/spec@7fa2f20).
  * Support for passive data segments (incl. `MemoryInit`).
 
 The code is otherwise as untouched as possible, so that we can relatively
@@ -18,14 +20,13 @@ TODO:
 
 module Error = Wasm.Error
 module Source = Wasm.Source
-module I32 = Wasm.I32
-module I64 = Wasm.I64
 module F32 = Wasm.F32
 module F64 = Wasm.F64
 module I32_convert = Wasm.I32_convert
 module I64_convert = Wasm.I64_convert
 module Utf8 = Lib.Utf8
 open CustomModule
+open Types
 
 (* Decoding stream *)
 
@@ -125,8 +126,8 @@ let rec vsN n s =
   then (if b land 0x40 = 0 then x else Int64.(logor x (logxor (-1L) 0x7fL)))
   else Int64.(logor x (shift_left (vsN (n - 7) s) 7))
 
-let vu1 s = Int64.to_int (vuN 1 s)
 let vu32 s = Int64.to_int32 (vuN 32 s)
+let vu64 s = vuN 64 s
 let vs7 s = Int64.to_int (vsN 7 s)
 let vs32 s = Int64.to_int32 (vsN 32 s)
 let vs33 s = I32_convert.wrap_i64 (vsN 33 s)
@@ -140,7 +141,6 @@ let len32 s =
   if I32.le_u n (Int32.of_int (len s)) then Int32.to_int n else
     error s pos "length out of bounds"
 
-let bool s = (vu1 s = 1)
 let string s = let n = len32 s in get_string n s
 let rec list f n s = if n = 0 then [] else let x = f s in x :: list f (n - 1) s
 let opt f b s = if b then Some (f s) else None
@@ -161,7 +161,7 @@ let sized (f : int -> stream -> 'a) (s : stream) =
 
 (* Types *)
 
-open Wasm.Types
+open Types
 
 let value_type s =
   match vs7 s with
@@ -186,19 +186,23 @@ let func_type s =
   | _ -> error s (pos s - 1) "malformed function type"
 
 let limits vu s =
-  let has_max = bool s in
+  let flags = u8 s in
+  require (flags land 0xfa = 0) s (pos s - 1) "malformed limits flags";
+  let has_max = (flags land 1 = 1) in
+  let is64 = (flags land 4 = 4) in
   let min = vu s in
   let max = opt vu has_max s in
-  {min; max}
+  {min; max}, is64
 
 let table_type s =
   let t = elem_type s in
-  let lim = limits vu32 s in
+  let lim, is64 = limits vu32 s in
+  require (not is64) s (pos s - 1) "tables cannot have 64-bit indices";
   TableType (lim, t)
 
 let memory_type s =
-  let lim = limits vu32 s in
-  MemoryType lim
+  let lim, is64 = limits vu64 s in
+  MemoryType (lim, if is64 then I64IndexType else I32IndexType)
 
 let mutability s =
   match u8 s with
@@ -226,7 +230,7 @@ let zero s = expect 0x00 s "zero byte expected"
 let memop s =
   let align = vu32 s in
   require (I32.le_u align 32l) s (pos s - 1) "malformed memop flags";
-  let offset = vu32 s in
+  let offset = vu64 s in
   Int32.to_int align, offset
 
 let block_type s =
@@ -246,6 +250,10 @@ let math_prefix s =
   | 0x05 -> i64_trunc_sat_f32_u
   | 0x06 -> i64_trunc_sat_f64_s
   | 0x07 -> i64_trunc_sat_f64_u
+  (* Manual extension for specific bulk-memory operations *)
+  | 0x0a -> zero s; zero s; memory_copy
+  | 0x0b -> zero s; memory_fill
+  (* End of manual extension *)
   (* Manual extension for passive data segments *)
   | 0x08 ->
     let x = at var s in
diff --git a/src/wasm-exts/customModuleEncode.ml b/src/wasm-exts/customModuleEncode.ml
index bad8e6782c3..1c2625139f7 100644
--- a/src/wasm-exts/customModuleEncode.ml
+++ b/src/wasm-exts/customModuleEncode.ml
@@ -1,10 +1,12 @@
 (*
 This module originated as a copy of interpreter/binary/encode.ml in the
 reference implementation.
+With adjustments from memory64.
 
 The changes are:
  * Support for writing out a source map for the Code parts
  * Support for additional custom sections
+ * Manual selective support for bulk-memory operations `memory_copy` and `memory_fill` (WebAssembly/spec@7fa2f20).
  * Support for passive data segments (incl. `MemoryInit`).
 
 The code is otherwise as untouched as possible, so that we can relatively
@@ -300,7 +302,6 @@ let encode (em : extended_module) =
       if -64L <= i && i < 64L then u8 b
       else (u8 (b lor 0x80); vs64 (Int64.shift_right i 7))
 
-    let vu1 i = vu64 Int64.(logand (of_int i) 1L)
     let vu32 i = vu64 Int64.(logand (of_int32 i) 0xffffffffL)
     let vs7 i = vs64 (Int64.of_int i)
     let vs32 i = vs64 (Int64.of_int32 i)
@@ -308,13 +309,14 @@ let encode (em : extended_module) =
     let f32 x = u32 (Wasm.F32.to_bits x)
     let f64 x = u64 (Wasm.F64.to_bits x)
 
+    let flag b i = if b then 1 lsl i else 0
+
     let len i =
       if Int32.to_int (Int32.of_int i) <> i then
         Code.error Wasm.Source.no_region
           "cannot encode length with more than 32 bit";
       vu32 (Int32.of_int i)
 
-    let bool b = vu1 (if b then 1 else 0)
     let string bs = len (String.length bs); put_string s bs
     let name n = string (Lib.Utf8.encode n)
     let list f xs = List.iter f xs
@@ -334,7 +336,7 @@ let encode (em : extended_module) =
 
     (* Types *)
 
-    open Wasm.Types
+    open Types
 
     let value_type = function
       | I32Type -> vs7 (-0x01)
@@ -349,14 +351,15 @@ let encode (em : extended_module) =
     let func_type = function
       | FuncType (ins, out) -> vs7 (-0x20); stack_type ins; stack_type out
 
-    let limits vu {min; max} =
-      bool (max <> None); vu min; opt vu max
+    let limits vu {min; max} it =
+      let flags = flag (max <> None) 0 + flag (it = I64IndexType) 2 in
+      u8 flags; vu min; opt vu max
 
     let table_type = function
-      | TableType (lim, t) -> elem_type t; limits vu32 lim
+      | TableType (lim, t) -> elem_type t; limits vu32 lim I32IndexType
 
     let memory_type = function
-      | MemoryType lim -> limits vu32 lim
+      | MemoryType (lim, it) -> limits vu64 lim it
 
     let mutability = function
       | Immutable -> u8 0
@@ -369,12 +372,12 @@ let encode (em : extended_module) =
 
     open Wasm.Source
     open Ast
-    open Wasm.Values
+    open Values
 
     let op n = u8 n
     let end_ () = op 0x0b
 
-    let memop {align; offset; _} = vu32 (Int32.of_int align); vu32 offset
+    let memop {align; offset; _} = vu32 (Int32.of_int align); vu64 offset
 
     let var x = vu32 x.it
 
@@ -472,6 +475,10 @@ let encode (em : extended_module) =
       | MemorySize -> op 0x3f; u8 0x00
       | MemoryGrow -> op 0x40; u8 0x00
 
+      (* Manual extension for bulk-memory operations *)
+      | MemoryFill -> op 0xfc; vu32 0x0bl; u8 0x00
+      | MemoryCopy -> op 0xfc; vu32 0x0al; u8 0x00; u8 0x00
+      (* End of manual extension *)
       (* Manual extension for passive data segments *)
       | MemoryInit x -> op 0xfc; vu32 0x08l; var x; u8 0x00
       (* End of manual extension *)
diff --git a/src/wasm-exts/memory.ml b/src/wasm-exts/memory.ml
new file mode 100644
index 00000000000..6f45d90293b
--- /dev/null
+++ b/src/wasm-exts/memory.ml
@@ -0,0 +1,155 @@
+(*
+This module originated as a copy of interpreter/runtime/memory.ml in the
+reference implementation.
+With adjustments from memory64.
+*)
+
+open Bigarray
+open Lib.Bigarray
+open Types
+open Values
+module I64_convert = Wasm.I64_convert
+
+type size = int64  (* number of pages *)
+type address = int64
+type offset = int64
+
+type memory' = (int, int8_unsigned_elt, c_layout) Array1.t
+type memory = {mutable content : memory'; max : size option; it : index_type}
+type t = memory
+
+exception Type
+exception Bounds
+exception SizeOverflow
+exception SizeLimit
+exception OutOfMemory
+
+let page_size = 0x10000L (* 64 KiB *)
+
+let within_limits n = function
+  | None -> true
+  | Some max -> I64.le_u n max
+
+let create n it =
+  if I64.gt_u n 0x10000L && it = I32IndexType then raise SizeOverflow else
+  try
+    let size = Int64.(mul n page_size) in
+    let mem = Array1_64.create Int8_unsigned C_layout size in
+    Array1.fill mem 0;
+    mem
+  with Out_of_memory -> raise OutOfMemory
+
+let alloc (MemoryType ({min; max}, it)) =
+  assert (within_limits min max);
+  {content = create min it; max; it}
+
+let bound mem =
+  Array1_64.dim mem.content
+
+let size mem =
+  Int64.(div (bound mem) page_size)
+
+let type_of mem =
+  MemoryType ({min = size mem; max = mem.max}, mem.it)
+
+let index_of mem = mem.it
+
+let value_of_address it x =
+  if it = I64IndexType then I64 (x) else I32 (Int64.to_int32 x)
+
+let address_of_value x =
+  match x with
+  | I64 i -> i
+  | I32 i -> I64_convert.extend_i32_u i
+  | _ -> raise Type
+
+let grow mem delta =
+  let old_size = size mem in
+  let new_size = Int64.add old_size delta in
+  if I64.gt_u old_size new_size then raise SizeOverflow else
+  if not (within_limits new_size mem.max) then raise SizeLimit else
+  let after = create new_size mem.it in
+  let dim = Array1_64.dim mem.content in
+  Array1.blit (Array1_64.sub mem.content 0L dim) (Array1_64.sub after 0L dim);
+  mem.content <- after
+
+let load_byte mem a =
+  try Array1_64.get mem.content a with Invalid_argument _ -> raise Bounds
+
+let store_byte mem a b =
+  try Array1_64.set mem.content a b with Invalid_argument _ -> raise Bounds
+
+let load_bytes mem a n =
+  let buf = Buffer.create n in
+  for i = 0 to n - 1 do
+    Buffer.add_char buf (Char.chr (load_byte mem Int64.(add a (of_int i))))
+  done;
+  Buffer.contents buf
+
+let store_bytes mem a bs =
+  for i = String.length bs - 1 downto 0 do
+    store_byte mem Int64.(add a (of_int i)) (Char.code bs.[i])
+  done
+
+let effective_address a o =
+  let ea = Int64.(add a o) in
+  if I64.lt_u ea a then raise Bounds;
+  ea
+
+let loadn mem a o n =
+  assert (n > 0 && n <= 8);
+  let rec loop a n =
+    if n = 0 then 0L else begin
+      let x = Int64.(shift_left (loop (add a 1L) (n - 1)) 8) in
+      Int64.logor (Int64.of_int (load_byte mem a)) x
+    end
+  in loop (effective_address a o) n
+
+let storen mem a o n x =
+  assert (n > 0 && n <= 8);
+  let rec loop a n x =
+    if n > 0 then begin
+      Int64.(loop (effective_address a 1L) (n - 1) (shift_right x 8));
+      store_byte mem a (Int64.to_int x land 0xff)
+    end
+  in loop (effective_address a o) n x
+
+let load_value mem a o t =
+  let n = loadn mem a o (Types.size t) in
+  match t with
+  | I32Type -> I32 (Int64.to_int32 n)
+  | I64Type -> I64 n
+  | F32Type -> F32 (F32.of_bits (Int64.to_int32 n))
+  | F64Type -> F64 (F64.of_bits n)
+
+let store_value mem a o v =
+  let x =
+    match v with
+    | I32 x -> Int64.of_int32 x
+    | I64 x -> x
+    | F32 x -> Int64.of_int32 (F32.to_bits x)
+    | F64 x -> F64.to_bits x
+  in storen mem a o (Types.size (Values.type_of v)) x
+
+let extend x n = function
+  | ZX -> x
+  | SX -> let sh = 64 - 8 * n in Int64.(shift_right (shift_left x sh) sh)
+
+let load_packed sz ext mem a o t =
+  assert (packed_size sz <= Types.size t);
+  let n = packed_size sz in
+  let x = extend (loadn mem a o n) n ext in
+  match t with
+  | I32Type -> I32 (Int64.to_int32 x)
+  | I64Type -> I64 x
+  | _ -> raise Type
+
+let store_packed sz mem a o v =
+  assert (packed_size sz <= Types.size (Values.type_of v));
+  let n = packed_size sz in
+  let x =
+    match v with
+    | I32 x -> Int64.of_int32 x
+    | I64 x -> x
+    | _ -> raise Type
+  in storen mem a o n x
diff --git a/src/wasm-exts/memory.mli b/src/wasm-exts/memory.mli
new file mode 100644
index 00000000000..01da46c7c22
--- /dev/null
+++ b/src/wasm-exts/memory.mli
@@ -0,0 +1,49 @@
+(*
+This module originated as a copy of interpreter/runtime/memory.mli in the
+reference implementation.
+With adjustments from memory64.
+*)
+
+open Types
+open Values
+
+type memory
+type t = memory
+
+type size = int64  (* number of pages *)
+type address = int64
+type offset = int64
+
+exception Type
+exception Bounds
+exception SizeOverflow
+exception SizeLimit
+exception OutOfMemory
+
+val page_size : int64
+
+val alloc : memory_type -> memory (* raises SizeOverflow, OutOfMemory *)
+val type_of : memory -> memory_type
+val index_of : memory -> index_type
+val size : memory -> size
+val bound : memory -> address
+val value_of_address : index_type -> address -> value
+val address_of_value : value -> address
+val grow : memory -> size -> unit
+  (* raises SizeLimit, SizeOverflow, OutOfMemory *)
+
+val load_byte : memory -> address -> int (* raises Bounds *)
+val store_byte : memory -> address -> int -> unit (* raises Bounds *)
+val load_bytes : memory -> address -> int -> string (* raises Bounds *)
+val store_bytes : memory -> address -> string -> unit (* raises Bounds *)
+
+val load_value :
+  memory -> address -> offset -> value_type -> value (* raises Bounds *)
+val store_value :
+  memory -> address -> offset -> value -> unit (* raises Bounds *)
+val load_packed :
+  pack_size -> extension -> memory -> address -> offset -> value_type -> value
+    (* raises Type, Bounds *)
+val store_packed :
+  pack_size -> memory -> address -> offset -> value -> unit
+    (* raises Type, Bounds *)
diff --git a/src/wasm-exts/operators.ml b/src/wasm-exts/operators.ml
index 7e000e1ae91..2fda88ebd76 100644
--- a/src/wasm-exts/operators.ml
+++ b/src/wasm-exts/operators.ml
@@ -7,6 +7,7 @@ that it got basically replicated into the customModuleDecode.ml file.
 Base revision: WebAssembly/spec@a7a1856.
 
 The changes are:
+  * Manual selective support for bulk-memory operations `memory_copy` and `memory_fill` (WebAssembly/spec@7fa2f20).
   * Support for passive data segments (incl. `MemoryInit`).
 
 The code is otherwise as untouched as possible, so that we can relatively
@@ -14,8 +15,8 @@ easily apply diffs from the original code (possibly manually).
 *)
 
 open Wasm.Source
-open Wasm.Types
-open Wasm.Values
+open Types
+open Values
 open Ast
 
 
@@ -230,6 +231,10 @@ let f64_reinterpret_i64 = Convert (F64 F64Op.ReinterpretInt)
 let memory_size = MemorySize
 let memory_grow = MemoryGrow
 
+(* Manual extension for specific bulk-memory operations *)
+let memory_fill = MemoryFill
+let memory_copy = MemoryCopy
+(* End of manual extension *)
 (* Manual extension for passive data segments *)
 let memory_init x = MemoryInit x
 (* End of manual extension *)
diff --git a/src/wasm-exts/types.ml b/src/wasm-exts/types.ml
new file mode 100644
index 00000000000..6e7ebcd37f8
--- /dev/null
+++ b/src/wasm-exts/types.ml
@@ -0,0 +1,140 @@
+(*
+This module originated as a copy of interpreter/syntax/types.ml in the
+reference implementation.
+With adjustments from memory 64.
+*)
+
+module I32 = Wasm.I32
+module I64 = Wasm.I64
+
+let rec map_filter f = function
+| [] -> []
+| x::xs ->
+  match f x with
+  | None -> map_filter f xs
+  | Some y -> y :: map_filter f xs
+
+(* Types *)
+
+type value_type = I32Type | I64Type | F32Type | F64Type
+type index_type = I32IndexType | I64IndexType
+type elem_type = FuncRefType
+type stack_type = value_type list
+type func_type = FuncType of stack_type * stack_type
+
+type 'a limits = {min : 'a; max : 'a option}
+type mutability = Immutable | Mutable
+type table_type = TableType of Int32.t limits * elem_type
+type memory_type = MemoryType of Int64.t limits * index_type
+type global_type = GlobalType of value_type * mutability
+type extern_type =
+  | ExternFuncType of func_type
+  | ExternTableType of table_type
+  | ExternMemoryType of memory_type
+  | ExternGlobalType of global_type
+
+type pack_size = Pack8 | Pack16 | Pack32
+type extension = SX | ZX
+
+
+(* Attributes *)
+
+let size = function
+  | I32Type | F32Type -> 4
+  | I64Type | F64Type -> 8
+
+let packed_size = function
+  | Pack8 -> 1
+  | Pack16 -> 2
+  | Pack32 -> 4
+
+let value_type_of_index_type = function
+  | I32IndexType -> I32Type
+  | I64IndexType -> I64Type
+
+
+(* Subtyping *)
+
+let match_limits ge lim1 lim2 =
+  ge lim1.min lim2.min &&
+  match lim1.max, lim2.max with
+  | _, None -> true
+  | None, Some _ -> false
+  | Some i, Some j -> ge j i
+
+let match_func_type ft1 ft2 =
+  ft1 = ft2
+
+let match_table_type (TableType (lim1, et1)) (TableType (lim2, et2)) =
+  et1 = et2 && match_limits I32.ge_u lim1 lim2
+
+let match_memory_type (MemoryType (lim1, it1)) (MemoryType (lim2, it2)) =
+  it1 = it2 && match_limits I64.ge_u lim1 lim2
+
+let match_global_type gt1 gt2 =
+  gt1 = gt2
+
+let match_extern_type et1 et2 =
+  match et1, et2 with
+  | ExternFuncType ft1, ExternFuncType ft2 -> match_func_type ft1 ft2
+  | ExternTableType tt1, ExternTableType tt2 -> match_table_type tt1 tt2
+  | ExternMemoryType mt1, ExternMemoryType mt2 -> match_memory_type mt1 mt2
+  | ExternGlobalType gt1, ExternGlobalType gt2 -> match_global_type gt1 gt2
+  | _, _ -> false
+
+
+(* Filters *)
+
+let funcs =
+  map_filter (function ExternFuncType t -> Some t | _ -> None)
+let tables =
+  map_filter (function ExternTableType t -> Some t | _ -> None)
+let memories =
+  map_filter (function ExternMemoryType t -> Some t | _ -> None)
+let globals =
+  map_filter (function ExternGlobalType t -> Some t | _ -> None)
+
+(* String conversion *)
+
+let string_of_value_type = function
+  | I32Type -> "i32"
+  | I64Type -> "i64"
+  | F32Type -> "f32"
+  | F64Type -> "f64"
+
+let string_of_value_types = function
+  | [t] -> string_of_value_type t
+  | ts -> "[" ^ String.concat " " (List.map string_of_value_type ts) ^ "]"
+
+let string_of_elem_type = function
+  | FuncRefType -> "funcref"
+
+let string_of_limits to_string {min; max} =
+  to_string min ^
+  (match max with None -> "" | Some n -> " " ^ to_string n)
+
+let string_of_memory_type = function
+  | MemoryType (lim, it) ->
+    string_of_value_type (value_type_of_index_type it) ^
+    " " ^ string_of_limits I64.to_string_u lim
+
+
+let string_of_table_type = function
+  | TableType (lim, t) -> string_of_limits I32.to_string_u lim ^ " " ^
+                          string_of_elem_type t
+
+let string_of_global_type = function
+  | GlobalType (t, Immutable) -> string_of_value_type t
+  | GlobalType (t, Mutable) -> "(mut " ^ string_of_value_type t ^ ")"
+
+let string_of_stack_type ts =
+  "[" ^ String.concat " " (List.map string_of_value_type ts) ^ "]"
+
+let string_of_func_type (FuncType (ins, out)) =
+  string_of_stack_type ins ^ " -> " ^ string_of_stack_type out
+
+let string_of_extern_type = function
+  | ExternFuncType ft -> "func " ^ string_of_func_type ft
+  | ExternTableType tt -> "table " ^ string_of_table_type tt
+  | ExternMemoryType mt -> "memory " ^ string_of_memory_type mt
+  | ExternGlobalType gt -> "global " ^ string_of_global_type gt
diff --git a/src/wasm-exts/values.ml b/src/wasm-exts/values.ml
new file mode 100644
index 00000000000..fc14357b019
--- /dev/null
+++ b/src/wasm-exts/values.ml
@@ -0,0 +1,86 @@
+(*
+This module originated as a copy of interpreter/syntax/values.ml in the
+reference implementation.
+No adjustments.
+*)
+
+open Types
+module F32 = Wasm.F32
+module F64 = Wasm.F64
+
+(* Values and operators *)
+
+type ('i32, 'i64, 'f32, 'f64) op =
+  I32 of 'i32 | I64 of 'i64 | F32 of 'f32 | F64 of 'f64
+
+type value = (I32.t, I64.t, F32.t, F64.t) op
+
+
+(* Typing *)
+
+let type_of = function
+  | I32 _ -> I32Type
+  | I64 _ -> I64Type
+  | F32 _ -> F32Type
+  | F64 _ -> F64Type
+
+let default_value = function
+  | I32Type -> I32 I32.zero
+  | I64Type -> I64 I64.zero
+  | F32Type -> F32 F32.zero
+  | F64Type -> F64 F64.zero
+
+
+(* Conversion *)
+
+let value_of_bool b = I32 (if b then 1l else 0l)
+
+let string_of_value = function
+  | I32 i -> I32.to_string_s i
+  | I64 i -> I64.to_string_s i
+  | F32 z -> F32.to_string z
+  | F64 z -> F64.to_string z
+
+let string_of_values = function
+  | [v] -> string_of_value v
+  | vs -> "[" ^ String.concat " " (List.map string_of_value vs) ^ "]"
+
+
+(* Injection & projection *)
+
+exception Value of value_type
+
+module type ValueType =
+sig
+  type t
+  val to_value : t -> value
+  val of_value : value -> t (* raise Value *)
+end
+
+module I32Value =
+struct
+  type t = I32.t
+  let to_value i = I32 i
+  let of_value = function I32 i -> i | _ -> raise (Value I32Type)
+end
+
+module I64Value =
+struct
+  type t = I64.t
+  let to_value i = I64 i
+  let of_value = function I64 i -> i | _ -> raise (Value I64Type)
+end
+
+module F32Value =
+struct
+  type t = F32.t
+  let to_value i = F32 i
+  let of_value = function F32 z -> z | _ -> raise (Value F32Type)
+end
+
+module F64Value =
+struct
+  type t = F64.t
+  let to_value i = F64 i
+  let of_value = function F64 z -> z | _ -> raise (Value F64Type)
+end
diff --git a/test/Makefile b/test/Makefile
index d5a94aa2003..df6238aa4b5 100644
--- a/test/Makefile
+++ b/test/Makefile
@@ -1,4 +1,5 @@
-QUICK_DIRS = cmp fail idl ld mo-idl repl run
+# TODO: Include ld when it supports Wasm64 
+QUICK_DIRS = cmp fail idl mo-idl repl run
 OTHER_DIRS = bench perf run-deser run-drun trap
 TEST_DIRS = $(QUICK_DIRS) $(OTHER_DIRS)
 SOURCE_PATHS ?= ../src # will be overridden when building coverage report with nix
diff --git a/test/bench/heap-64.mo b/test/bench/heap-64.mo
new file mode 100644
index 00000000000..32fd57e259d
--- /dev/null
+++ b/test/bench/heap-64.mo
@@ -0,0 +1,55 @@
+// allocate 2 big arrays with a 10^5 entries each, and populate them
+// with Int64s and Nat64s (somewhat randomly)
+import { Array_tabulate; performanceCounter; intToNat64Wrap; intToInt64Wrap; rts_heap_size; debugPrint } = "mo:⛔";
+
+actor Tagged {
+
+    func arrNat64(seed : Nat) : ([Nat64], Nat) {
+        var state = seed;
+        let cutoff = 9223372036854775805;
+        var taggable = 0;
+        (Array_tabulate(
+            100_000,
+            func _ {
+                state := (state + 17) * 57 % cutoff;
+                let wrapped = intToNat64Wrap state;
+                let bits = (wrapped >> 60) & 3;
+                if (bits == 0 or bits == 3)
+                   taggable += 1;
+                   wrapped
+            })
+         , taggable)
+    };
+
+    func arrInt64(seed : Nat) : ([Int64], Nat) {
+        var state = seed;
+        let cutoff = 18446744073709551631;
+        var taggable = 0;
+        (Array_tabulate(
+            100_000,
+            func _ {
+                state := (state + 17) * 57 % cutoff;
+                let wrapped = intToInt64Wrap state;
+                let bits = (wrapped >> 60) & 3;
+                if (bits == 0 or bits == 3)
+                   taggable += 1;
+                   wrapped
+            }), taggable)
+    };
+
+    func counters() : (Int, Nat64) = (rts_heap_size(), performanceCounter(0));
+
+    public func go() : async () {
+        let (m0, n0) = counters();
+        let (_, nt) = arrNat64(7);
+        let (m1, n1) = counters();
+        debugPrint(debug_show (nt, m1 - m0, n1 - n0));
+
+        let (i0, j0) = counters();
+        let (_, it) = arrInt64(13);
+        let (i1, j1) = counters();
+        debugPrint(debug_show (it, i1 - i0, j1 - j0))
+    }
+}
+
+//CALL ingress go 0x4449444C0000
diff --git a/test/bench/ok/alloc.drun-run.ok b/test/bench/ok/alloc.drun-run.ok
index e8bb0b9613a..027d9bd722a 100644
--- a/test/bench/ok/alloc.drun-run.ok
+++ b/test/bench/ok/alloc.drun-run.ok
@@ -1,8 +1,8 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
-debug.print: (+335_544_320, 4_613_740_069)
+debug.print: (+268_435_456, 3_071_963_514)
 ingress Completed: Reply: 0x4449444c0000
-debug.print: (+335_544_320, 4_613_737_156)
+debug.print: (+268_435_456, 3_070_488_954)
 ingress Completed: Reply: 0x4449444c0000
-debug.print: (+335_544_320, 4_613_737_171)
+debug.print: (+268_435_456, 3_070_488_954)
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/bench/ok/bignum.drun-run.ok b/test/bench/ok/bignum.drun-run.ok
index d7497c87ad5..55f31d9ab4e 100644
--- a/test/bench/ok/bignum.drun-run.ok
+++ b/test/bench/ok/bignum.drun-run.ok
@@ -1,6 +1,6 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
-debug.print: {cycles = 2_626_820; size = +60_128}
+debug.print: {cycles = 2_619_967; size = +59_652}
 ingress Completed: Reply: 0x4449444c0000
-debug.print: {cycles = 107_960_874; size = +1_826_952}
+debug.print: {cycles = 107_892_482; size = +1_817_872}
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/bench/ok/candid-subtype-cost.drun-run.ok b/test/bench/ok/candid-subtype-cost.drun-run.ok
index d67cf3fef1f..9daf0b8e774 100644
--- a/test/bench/ok/candid-subtype-cost.drun-run.ok
+++ b/test/bench/ok/candid-subtype-cost.drun-run.ok
@@ -1,4 +1,4 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
-debug.print: {cycles = 1_027_001; heap_bytes = +16_936}
+debug.print: {cycles = 982_245; heap_bytes = +12_564}
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/bench/ok/heap-32.drun-run.ok b/test/bench/ok/heap-32.drun-run.ok
index 305210a061e..2ad4c135852 100644
--- a/test/bench/ok/heap-32.drun-run.ok
+++ b/test/bench/ok/heap-32.drun-run.ok
@@ -1,5 +1,5 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
-debug.print: (50_227, +37_379_412, 870_764_655)
-debug.print: (50_070, +38_903_788, 905_085_904)
+debug.print: (50_227, +29_863_068, 769_836_717)
+debug.print: (50_070, +32_992_212, 831_339_788)
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/bench/ok/heap-64.drun-run.ok b/test/bench/ok/heap-64.drun-run.ok
new file mode 100644
index 00000000000..d2f53614912
--- /dev/null
+++ b/test/bench/ok/heap-64.drun-run.ok
@@ -0,0 +1,5 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+debug.print: (49_965, +47_942_744, 1_027_584_937)
+debug.print: (49_806, +47_960_000, 1_036_639_548)
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/bench/ok/nat16.drun-run.ok b/test/bench/ok/nat16.drun-run.ok
index 9bb141f2499..8febef52370 100644
--- a/test/bench/ok/nat16.drun-run.ok
+++ b/test/bench/ok/nat16.drun-run.ok
@@ -1,4 +1,4 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
-debug.print: (0, 55_575_377)
+debug.print: (0, 51_380_880)
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/bench/ok/palindrome.drun-run.ok b/test/bench/ok/palindrome.drun-run.ok
index 6356d04b55b..4861a7b69fe 100644
--- a/test/bench/ok/palindrome.drun-run.ok
+++ b/test/bench/ok/palindrome.drun-run.ok
@@ -1,9 +1,9 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
-debug.print: (true, +1_464, 19_881)
-debug.print: (false, +1_464, 18_808)
-debug.print: (false, +1_464, 19_858)
-debug.print: (true, +1_092, 18_713)
-debug.print: (false, +1_080, 17_101)
-debug.print: (false, +1_080, 18_571)
+debug.print: (true, +1_188, 13_154)
+debug.print: (false, +1_188, 12_203)
+debug.print: (false, +1_188, 13_133)
+debug.print: (true, +868, 13_262)
+debug.print: (false, +868, 11_725)
+debug.print: (false, +868, 13_633)
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/bench/ok/region-mem.drun-run.ok b/test/bench/ok/region-mem.drun-run.ok
index 996b7e106eb..b99bc582edf 100644
--- a/test/bench/ok/region-mem.drun-run.ok
+++ b/test/bench/ok/region-mem.drun-run.ok
@@ -1,4 +1,4 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
-debug.print: {heap_diff = 0; instr_diff = 8_380_219_796}
+debug.print: {heap_diff = 0; instr_diff = 5_964_300_649}
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/bench/ok/region0-mem.drun-run.ok b/test/bench/ok/region0-mem.drun-run.ok
index e0f81d95884..bf527929218 100644
--- a/test/bench/ok/region0-mem.drun-run.ok
+++ b/test/bench/ok/region0-mem.drun-run.ok
@@ -1,4 +1,4 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
-debug.print: {heap_diff = 0; instr_diff = 8_279_556_500}
+debug.print: {heap_diff = 0; instr_diff = 6_392_119_657}
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/bench/ok/stable-mem.drun-run.ok b/test/bench/ok/stable-mem.drun-run.ok
index b64bd840aaf..5e3eeec3bce 100644
--- a/test/bench/ok/stable-mem.drun-run.ok
+++ b/test/bench/ok/stable-mem.drun-run.ok
@@ -1,4 +1,4 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
-debug.print: {heap_diff = 0; instr_diff = 4_454_351_252}
+debug.print: {heap_diff = 0; instr_diff = 3_875_537_257}
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/ld/Makefile b/test/ld/Makefile
index b342e7e88ec..7e8bf4a3119 100644
--- a/test/ld/Makefile
+++ b/test/ld/Makefile
@@ -14,26 +14,26 @@ WASM_LD?=wasm-ld-13
 MO_LD?=../../src/mo-ld
 
 _out/%.lib.o: %.c | _out
-	$(WASM_CLANG) --compile -fPIC --target=wasm32-emscripten --optimize=3 \
-		-fno-builtin -Wall \
+	$(WASM_CLANG) --compile -fpic --target=wasm64-emscripten --optimize=3 \
+		-fno-builtin -ffreestanding \
 		$< --output $@
 
 _out/%.lib.wasm: _out/%.lib.o | _out
-	$(WASM_LD) --import-memory --shared --no-entry --gc-sections \
+	$(WASM_LD) -mwasm64 --import-memory --shared --no-entry --gc-sections \
 		--export=__wasm_call_ctors \
 		$< -o $@
 
 _out/%.base.wasm: %.wat | _out
-	wat2wasm --debug-names $< -o $@
+	wat2wasm --enable-memory64 --debug-names $< -o $@
 
 _out/%.linked.wasm: _out/%.base.wasm _out/%.lib.wasm
 	$(MO_LD) -b _out/$*.base.wasm -l _out/$*.lib.wasm -o _out/$*.linked.wasm
 
 _out/%.wat: _out/%.wasm
-	wasm2wat $< -o $@
+	wasm2wat --enable-memory64 $< -o $@
 
 _out/%.valid: _out/%.wasm
-	wasm-validate $< > $@ 2>&1 || true
+	wasm-validate --enable-memory64 $< > $@ 2>&1 || true
 
 
 include ../*.mk
diff --git a/test/ld/fun-ptr.c b/test/ld/fun-ptr.c
index 83d84a8ef9b..1b077e8ef6a 100644
--- a/test/ld/fun-ptr.c
+++ b/test/ld/fun-ptr.c
@@ -4,14 +4,20 @@ int f0(int x, int y)
     return x + y;
 }
 
+// Clang/LLVM bug for Wasm64: &f0 generates a 32-bit value that is incompatible to void*
+void* result_f1 = (void*)1; // &f0
+
 __attribute__ ((visibility("default")))
 int (*f1(void)) (int x, int y)
 {
-    return &f0;
+    return result_f1;
 }
 
+// Clang/LLVM bug for Wasm64: &f0 generates a 32-bit value that is incompatible to void*
+void* result_f2 = (void*)2; // &f1
+
 __attribute__ ((visibility("default")))
-void *f2()
+void* f2()
 {
-    return &f1;
+    return result_f2;
 }
diff --git a/test/ld/fun-ptr.wat b/test/ld/fun-ptr.wat
index 15d5b483ea8..929cdeeaa43 100644
--- a/test/ld/fun-ptr.wat
+++ b/test/ld/fun-ptr.wat
@@ -1,14 +1,17 @@
 (module
-  (type (;0;) (func (result i32)))
+  (type (;0;) (func (result i64)))
   (type (;1;) (func (param i32) (param i32) (result i32)))
-  (import "rts" "f2" (func $f2 (result i32)))
+  (type (;2;) (func (result i32)))
+  (import "rts" "f2" (func $f2 (result i64)))
   (table (;0;) 1 1 funcref)
-  (memory (;0;) 2)
-  (global $heap_base i32 (i32.const 65536))
+  (memory (;0;) i64 2)
+  (global $heap_base i64 (i64.const 65536))
   (export "__heap_base" (global $heap_base))
-  (func $call_imported (type 0)
+  (func $call_imported (type 2)
     call $f2
+    i32.wrap_i64
     call_indirect (type 0)
+    i32.wrap_i64
     i32.const 3
     i32.const 5
     call_indirect (type 1)))
diff --git a/test/ld/ok/fun-ptr.linked.wat.ok b/test/ld/ok/fun-ptr.linked.wat.ok
index 1dc8da14ab2..e950eabfc3a 100644
--- a/test/ld/ok/fun-ptr.linked.wat.ok
+++ b/test/ld/ok/fun-ptr.linked.wat.ok
@@ -1,30 +1,41 @@
 (module
-  (type (;0;) (func (result i32)))
+  (type (;0;) (func (result i64)))
   (type (;1;) (func (param i32 i32) (result i32)))
-  (type (;2;) (func))
-  (func $call_imported (type 0) (result i32)
+  (type (;2;) (func (result i32)))
+  (type (;3;) (func))
+  (func $call_imported (type 2) (result i32)
     call $f2
+    i32.wrap_i64
     call_indirect (type 0)
+    i32.wrap_i64
     i32.const 3
     i32.const 5
     call_indirect (type 1))
-  (func $__wasm_call_ctors (type 2)
+  (func $__wasm_call_ctors (type 3)
     call $__wasm_apply_data_relocs)
-  (func $__wasm_apply_data_relocs (type 2))
+  (func $__wasm_apply_data_relocs (type 3))
   (func $f0 (type 1) (param i32 i32) (result i32)
     local.get 1
     local.get 0
     i32.add)
-  (func $f1 (type 0) (result i32)
-    global.get 0)
-  (func $f2 (type 0) (result i32)
-    global.get 1)
-  (func $link_start (type 2)
+  (func $f1 (type 0) (result i64)
+    i64.const 65536
+    i64.const 0
+    i64.add
+    i64.load)
+  (func $f2 (type 0) (result i64)
+    i64.const 65536
+    i64.const 8
+    i64.add
+    i64.load)
+  (func $link_start (type 3)
+    i64.const 65536
+    i32.const 0
+    i32.const 16
+    memory.init 0
     call $__wasm_call_ctors)
-  (table (;0;) 3 3 funcref)
-  (memory (;0;) 2)
-  (global (;0;) i32 (i32.const 1))
-  (global (;1;) i32 (i32.const 2))
-  (global (;2;) i32 (i32.const 65536))
+  (table (;0;) 1 1 funcref)
+  (memory (;0;) i64 2)
+  (global (;0;) i64 (i64.const 65792))
   (start $link_start)
-  (elem (;0;) (i32.const 1) func $f0 $f1))
+  (data (;0;) "\01\00\00\00\00\00\00\00\02\00\00\00\00\00\00\00"))
diff --git a/test/ld/ok/representative.linked.wat.ok b/test/ld/ok/representative.linked.wat.ok
index f0cd781fd47..bcf3c073ec9 100644
--- a/test/ld/ok/representative.linked.wat.ok
+++ b/test/ld/ok/representative.linked.wat.ok
@@ -23,6 +23,6 @@
   (func $link_start (type 0)
     call $__wasm_call_ctors)
   (table (;0;) 0 0 funcref)
-  (memory (;0;) 2)
-  (global (;0;) i32 (i32.const 65536))
+  (memory (;0;) i64 2)
+  (global (;0;) i64 (i64.const 65536))
   (start $link_start))
diff --git a/test/ld/representative.c b/test/ld/representative.c
index 3d6990a0db8..fae7fc5e626 100644
--- a/test/ld/representative.c
+++ b/test/ld/representative.c
@@ -9,4 +9,3 @@ export int square(int i) {
   unresolved_import();
   return i * i;
 }
-
diff --git a/test/ld/representative.wat b/test/ld/representative.wat
index f2da5cb0f9c..a47428a2afb 100644
--- a/test/ld/representative.wat
+++ b/test/ld/representative.wat
@@ -4,8 +4,8 @@
   (import "rts" "square" (func $square (param i32) (result i32)))
   (import "rts" "not_yet_imported2" (func $not_yet_imported2))
   (table (;0;) 0 0 funcref)
-  (memory (;0;) 2)
-  (global $heap_base i32 (i32.const 65536))
+  (memory (;0;) i64 2)
+  (global $heap_base i64 (i64.const 65536))
   (export "__heap_base" (global $heap_base))
   (export "resolved_import" (func $resolved_export))
   (func $call_imported (type 0)
diff --git a/test/ld/wrong-type.c b/test/ld/wrong-type.c
index 7d4359154af..909400a22d4 100644
--- a/test/ld/wrong-type.c
+++ b/test/ld/wrong-type.c
@@ -1,3 +1,2 @@
 #define export __attribute__ ((visibility("default")))
 export int exported() { return 42; }
-
diff --git a/test/ld/wrong-type.wat b/test/ld/wrong-type.wat
index 7c01f9caaff..563f11565f3 100644
--- a/test/ld/wrong-type.wat
+++ b/test/ld/wrong-type.wat
@@ -2,8 +2,8 @@
   (type (;0;) (func))
   (import "rts" "exported" (func $exported (param i32)))
   (table (;0;) 0 0 funcref)
-  (memory (;0;) 2)
-  (global $heap_base i32 (i32.const 65536))
+  (memory (;0;) i64 2)
+  (global $heap_base i64 (i64.const 65536))
   (export "__heap_base" (global $heap_base))
   (func $call_imported (type 0)
     i32.const 42
diff --git a/test/random/Embedder.hs b/test/random/Embedder.hs
index 77c79fb2d84..a4695a56c75 100644
--- a/test/random/Embedder.hs
+++ b/test/random/Embedder.hs
@@ -36,7 +36,7 @@ addCompilerArgs (WasmTime _) = ("-wasi-system-api" :)
 addCompilerArgs Drun = id
 
 addEmbedderArgs Reference = id
-addEmbedderArgs (WasmTime _) = \args -> ["-C", "cache=n", "-W", "nan-canonicalization=y", "-W", "multi-memory", "-W", "bulk-memory"] <> args
+addEmbedderArgs (WasmTime _) = \args -> ["-C", "cache=n", "-W", "nan-canonicalization=y", "-W", "memory64", "-W", "multi-memory", "-W", "bulk-memory"] <> args
 addEmbedderArgs Drun = ("--extra-batches" :) . ("10" :)
 
 embedderInvocation :: Embedder -> [Text] -> [Text]
diff --git a/test/random/Main.hs b/test/random/Main.hs
index 25dfa003cd6..03b96286105 100644
--- a/test/random/Main.hs
+++ b/test/random/Main.hs
@@ -57,7 +57,7 @@ main = do
   let tests :: TestTree
       tests = testGroup "Motoko tests" . concat
                $ [ [arithProps, conversionProps, utf8Props, matchingProps] | good ]
-              <> [ [encodingProps] | goodDrun ]
+               <> [ [encodingProps] | goodDrun ]
 
   if not (good || goodDrun)
   then putStrLn "No embedder available for testing. Done..."
diff --git a/test/random/README.md b/test/random/README.md
index c5368ba2c9e..f4a5253b130 100644
--- a/test/random/README.md
+++ b/test/random/README.md
@@ -35,7 +35,7 @@ With this piece of information you can run the identical test locally,
 using the following `nix-build` invocation from your `motoko`
 directory:
 ``` shell
-$ nix-build -A tests --arg replay 232458
+$ nix-build -A tests.qc --arg replay 232458
 ```
 
 ## Running in `moc` and `wasm-interp`
diff --git a/test/run-drun-non-ci/actor64.mo b/test/run-drun-non-ci/actor64.mo
new file mode 100644
index 00000000000..6ca39e6750d
--- /dev/null
+++ b/test/run-drun-non-ci/actor64.mo
@@ -0,0 +1,66 @@
+// Only works with incremental GC. 
+// Use:
+// ```
+// export EXTRA_MOC_ARGS="--incremental-gc"
+// ../run.sh -d actor64.drun
+// ```
+import Prim "mo:prim";
+
+actor {
+    let MB = 1024 * 1024;
+    let GB = 1024 * MB;
+
+    Prim.debugPrint("Hello Wasm64 Motoko on IC!");
+
+    func trace() {
+        Prim.debugPrint("Heap size: " # debug_show (Prim.rts_heap_size() / GB) # " GB");
+    };
+
+    type Node = ?{ value : [var Nat]; next : Node };
+    var first : Node = null;
+
+    public func allocate() : async () {
+        let value = Prim.Array_init(128 * MB, 0);
+        first := ?{ value; next = first };
+        trace();
+    };
+};
+
+//SKIP run
+//SKIP run-low
+//SKIP run-ir
+// too slow on ic-ref-run:
+//SKIP comp-ref
+
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
+//CALL ingress allocate "DIDL\x00\x00"
diff --git a/test/run-drun-non-ci/memory-reserve-composite.drun b/test/run-drun-non-ci/memory-reserve-composite.drun
deleted file mode 100644
index 1da30f6357b..00000000000
--- a/test/run-drun-non-ci/memory-reserve-composite.drun
+++ /dev/null
@@ -1,5 +0,0 @@
-# SKIP ic-ref-run
-install $ID memory-reserve-composite/memory-reserve-composite.mo ""
-ingress $ID prepare1 "DIDL\x00\x00"
-ingress $ID prepare2 "DIDL\x00\x00"
-query $ID allocateInCompositeQuery "DIDL\x00\x00"
diff --git a/test/run-drun-non-ci/memory-reserve-composite/memory-reserve-composite.mo b/test/run-drun-non-ci/memory-reserve-composite/memory-reserve-composite.mo
deleted file mode 100644
index d1238e382f8..00000000000
--- a/test/run-drun-non-ci/memory-reserve-composite/memory-reserve-composite.mo
+++ /dev/null
@@ -1,39 +0,0 @@
-import Prim "mo:⛔";
-actor {
-    stable var stableData = Prim.Array_tabulate(1024 * 1024, func(index) { index });
-    var array0 : [var Nat] = [var];
-    var array1 : [var Nat] = [var];
-    var array2 : [var Nat] = [var];
-    var array3 : [var Nat] = [var];
-    Prim.debugPrint("Initialized " # debug_show (Prim.rts_memory_size()));
-
-    public func prepare1() : async () {
-        array0 := Prim.Array_init(256 * 1024 * 1024, 0); // 1GB
-        array1 := Prim.Array_init(256 * 1024 * 1024, 1); // 2GB
-        Prim.debugPrint("Prepared1 " # debug_show (Prim.rts_memory_size()));
-    };
-
-    public func prepare2() : async () {
-        array2 := Prim.Array_init(256 * 1024 * 1024, 2); // 3GB
-        array3 := Prim.Array_init(150 * 1024 * 1024, 3); // around 3.75GB
-        Prim.debugPrint("Prepared2 " # debug_show (Prim.rts_memory_size()));
-    };
-
-    public composite query func allocateInCompositeQuery() : async () {
-        ignore Prim.Array_init(50 * 1024 * 1024, 4);
-        Prim.debugPrint("Composite query call " # debug_show (Prim.rts_memory_size()));
-        assert (Prim.rts_memory_size() > 3840 * 1024 * 1024);
-        await nestedQuery();
-        ignore Prim.Array_init(5 * 1024 * 1024, 4);
-        Prim.debugPrint("Composite query callback " # debug_show (Prim.rts_memory_size()));
-        assert (Prim.rts_memory_size() > 3840 * 1024 * 1024);
-    };
-
-    public query func nestedQuery() : async () {
-        Prim.debugPrint("Nested query " # debug_show (Prim.rts_memory_size()));
-    };
-};
-
-//SKIP run
-//SKIP run-ir
-//SKIP run-low
diff --git a/test/run-drun-non-ci/memory-reserve-query.drun b/test/run-drun-non-ci/memory-reserve-query.drun
deleted file mode 100644
index f7956e7ab96..00000000000
--- a/test/run-drun-non-ci/memory-reserve-query.drun
+++ /dev/null
@@ -1,5 +0,0 @@
-# SKIP ic-ref-run
-install $ID memory-reserve-query/memory-reserve-query.mo ""
-ingress $ID prepare1 "DIDL\x00\x00"
-ingress $ID prepare2 "DIDL\x00\x00"
-query $ID allocateInQuery "DIDL\x00\x00"
diff --git a/test/run-drun-non-ci/memory-reserve-query/memory-reserve-query.mo b/test/run-drun-non-ci/memory-reserve-query/memory-reserve-query.mo
deleted file mode 100644
index b50b802cb6c..00000000000
--- a/test/run-drun-non-ci/memory-reserve-query/memory-reserve-query.mo
+++ /dev/null
@@ -1,31 +0,0 @@
-import Prim "mo:⛔";
-actor {
-    stable var stableData = Prim.Array_tabulate(1024 * 1024, func(index) { index });
-    var array0 : [var Nat] = [var];
-    var array1 : [var Nat] = [var];
-    var array2 : [var Nat] = [var];
-    var array3 : [var Nat] = [var];
-    Prim.debugPrint("Initialized " # debug_show (Prim.rts_memory_size()));
-
-    public func prepare1() : async () {
-        array0 := Prim.Array_init(256 * 1024 * 1024, 0); // 1GB
-        array1 := Prim.Array_init(256 * 1024 * 1024, 1); // 2GB
-        Prim.debugPrint("Prepared1 " # debug_show (Prim.rts_memory_size()));
-    };
-
-    public func prepare2() : async () {
-        array2 := Prim.Array_init(256 * 1024 * 1024, 2); // 3GB
-        array3 := Prim.Array_init(150 * 1024 * 1024, 3); // around 3.75GB
-        Prim.debugPrint("Prepared2 " # debug_show (Prim.rts_memory_size()));
-    };
-
-    public query func allocateInQuery() : async () {
-        ignore Prim.Array_init(50 * 1024 * 1024, 4);
-        Prim.debugPrint("Query call " # debug_show (Prim.rts_memory_size()));
-        assert (Prim.rts_memory_size() > 3840 * 1024 * 1024);
-    };
-};
-
-//SKIP run
-//SKIP run-ir
-//SKIP run-low
diff --git a/test/run-drun-non-ci/memory-reserve-update.drun b/test/run-drun-non-ci/memory-reserve-update.drun
deleted file mode 100644
index 00296d68dab..00000000000
--- a/test/run-drun-non-ci/memory-reserve-update.drun
+++ /dev/null
@@ -1,5 +0,0 @@
-# SKIP ic-ref-run
-install $ID memory-reserve-update/memory-reserve-update.mo ""
-ingress $ID prepare1 "DIDL\x00\x00"
-ingress $ID prepare2 "DIDL\x00\x00"
-ingress $ID allocateInUpdate "DIDL\x00\x00"
diff --git a/test/run-drun-non-ci/memory-reserve-update/memory-reserve-update.mo b/test/run-drun-non-ci/memory-reserve-update/memory-reserve-update.mo
deleted file mode 100644
index cc8f3aaeb38..00000000000
--- a/test/run-drun-non-ci/memory-reserve-update/memory-reserve-update.mo
+++ /dev/null
@@ -1,30 +0,0 @@
-import Prim "mo:⛔";
-actor {
-    stable var stableData = Prim.Array_tabulate(1024 * 1024, func(index) { index });
-    var array0 : [var Nat] = [var];
-    var array1 : [var Nat] = [var];
-    var array2 : [var Nat] = [var];
-    var array3 : [var Nat] = [var];
-    Prim.debugPrint("Initialized " # debug_show (Prim.rts_memory_size()));
-
-    public func prepare1() : async () {
-        array0 := Prim.Array_init(256 * 1024 * 1024, 0); // 1GB
-        array1 := Prim.Array_init(256 * 1024 * 1024, 1); // 2GB
-        Prim.debugPrint("Prepared1 " # debug_show (Prim.rts_memory_size()));
-    };
-
-    public func prepare2() : async () {
-        array2 := Prim.Array_init(256 * 1024 * 1024, 2); // 3GB
-        array3 := Prim.Array_init(150 * 1024 * 1024, 3); // around 3.75GB
-        Prim.debugPrint("Prepared2 " # debug_show (Prim.rts_memory_size()));
-    };
-
-    public func allocateInUpdate() : async () {
-        Prim.debugPrint("Update call " # debug_show (Prim.rts_memory_size()));
-        ignore Prim.Array_init(50 * 1024 * 1024, 4);
-    };
-};
-
-//SKIP run
-//SKIP run-ir
-//SKIP run-low
diff --git a/test/run-drun-non-ci/memory-reserve-upgrade.drun b/test/run-drun-non-ci/memory-reserve-upgrade.drun
deleted file mode 100644
index 77c3762f6d5..00000000000
--- a/test/run-drun-non-ci/memory-reserve-upgrade.drun
+++ /dev/null
@@ -1,5 +0,0 @@
-# SKIP ic-ref-run
-install $ID memory-reserve-upgrade/memory-reserve-upgrade.mo ""
-ingress $ID prepare1 "DIDL\x00\x00"
-ingress $ID prepare2 "DIDL\x00\x00"
-upgrade $ID memory-reserve-upgrade/memory-reserve-upgrade.mo ""
diff --git a/test/run-drun-non-ci/memory-reserve-upgrade/memory-reserve-upgrade.mo b/test/run-drun-non-ci/memory-reserve-upgrade/memory-reserve-upgrade.mo
deleted file mode 100644
index 85cef10b07c..00000000000
--- a/test/run-drun-non-ci/memory-reserve-upgrade/memory-reserve-upgrade.mo
+++ /dev/null
@@ -1,25 +0,0 @@
-import Prim "mo:⛔";
-actor {
-    stable var stableData = Prim.Array_tabulate(1024 * 1024, func(index) { index });
-    var array0 : [var Nat] = [var];
-    var array1 : [var Nat] = [var];
-    var array2 : [var Nat] = [var];
-    var array3 : [var Nat] = [var];
-    Prim.debugPrint("Initialized " # debug_show (Prim.rts_memory_size()));
-
-    public func prepare1() : async () {
-        array0 := Prim.Array_init(256 * 1024 * 1024, 0); // 1GB
-        array1 := Prim.Array_init(256 * 1024 * 1024, 1); // 2GB
-        Prim.debugPrint("Prepared1 " # debug_show (Prim.rts_memory_size()));
-    };
-
-    public func prepare2() : async () {
-        array2 := Prim.Array_init(256 * 1024 * 1024, 2); // 3GB
-        array3 := Prim.Array_init(150 * 1024 * 1024, 3); // around 3.75GB
-        Prim.debugPrint("Prepared2 " # debug_show (Prim.rts_memory_size()));
-    };
-};
-
-//SKIP run
-//SKIP run-ir
-//SKIP run-low
diff --git a/test/run-drun-non-ci/ok/actor64.drun-run.ok b/test/run-drun-non-ci/ok/actor64.drun-run.ok
new file mode 100644
index 00000000000..9a123c0edbf
--- /dev/null
+++ b/test/run-drun-non-ci/ok/actor64.drun-run.ok
@@ -0,0 +1,67 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+debug.print: Hello Wasm64 Motoko on IC!
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 1 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 2 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 3 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 4 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 5 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 6 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 7 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 8 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 9 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 10 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 11 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 12 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 13 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 14 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 15 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 16 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 17 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 18 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 19 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 20 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 21 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 22 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 23 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 24 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 25 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 26 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 27 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 28 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 29 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 30 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 31 GB
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Heap size: 32 GB
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/actor-class-mgmt-enhanced.mo b/test/run-drun/actor-class-mgmt-enhanced.mo
new file mode 100644
index 00000000000..17a82939001
--- /dev/null
+++ b/test/run-drun/actor-class-mgmt-enhanced.mo
@@ -0,0 +1,156 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+import Prim "mo:⛔";
+import Cycles = "cycles/cycles";
+import Cs "actor-class-mgmt/C";
+
+actor a {
+  type Change_origin = {
+      #from_user : {
+          user_id : Principal;
+      };
+      #from_canister : {
+          canister_id : Principal;
+          canister_version : ?Nat64;
+      };
+  };
+
+  type Change_details = {
+      #creation : { controllers : [Principal] };
+      #code_uninstall;
+      #code_deployment : {
+          mode : { #install; #reinstall; #upgrade};
+          // module_hash : Blob; // introduces non-determinism when codegen improves
+      };
+      #controllers_change : {
+          controllers : [Principal];
+      };
+  };
+
+  type Change = {
+      // timestamp_nanos : Nat64; // just omit this
+      canister_version : Nat64;
+      origin : Change_origin;
+      details : Change_details;
+  };
+
+  let ic00 = actor "aaaaa-aa" :
+    actor {
+      create_canister : {
+        settings : ? {
+          controllers : ?[Principal];
+          compute_allocation: ?Nat;
+          memory_allocation: ?Nat;
+        freezing_threshold: ?Nat;
+       }
+     } -> async { canister_id : Principal };
+
+      canister_info : {
+          canister_id : Principal;
+          num_requested_changes : ?Nat64;
+      } -> async {
+          total_num_changes : Nat64;
+          recent_changes : [Change];
+          // module_hash : ?Blob;
+          controllers : [Principal];
+      };
+   };
+
+  let default_settings = { settings = null };
+  // same as default but explicit
+  let settings = { settings = ? {
+     controllers = null;
+     compute_allocation = null;
+     freezing_threshold = null;
+     memory_allocation = null;
+    };
+  };
+
+  type IncrementalStabilization = actor {
+    __motoko_stabilize_before_upgrade : () -> async ();
+    __motoko_destabilize_after_upgrade : () -> async ();
+  };
+
+  func useIncrementalStabilization(a : actor {}) : IncrementalStabilization {
+    actor (debug_show (Prim.principalOfActor(a))) : IncrementalStabilization;
+  };
+
+  public func go () : async () {
+    // To get lots of cycles in both drun and ic-ref-run
+    if (Cycles.balance() == 0)
+      await Cycles.provisional_top_up_actor(a, 100_000_000_000_000);
+
+    do {
+      Cycles.add(2_000_000_000_000);
+      let c0 = await
+        Cs.C (0, ?(Prim.principalOfActor a));
+      assert ({args = 0; upgrades = 0} == (await c0.observe()));
+
+      Cycles.add(2_000_000_000_000);
+      let c1 = await
+        (system Cs.C)(#new default_settings)(1, null);
+      assert ({args = 1; upgrades = 0} == (await c1.observe()));
+      assert (c1 != c0);
+
+      Cycles.add(2_000_000_000_000);
+      let c2 = await
+        (system Cs.C)(#new settings)(2, null);
+      assert ({args = 2; upgrades = 0} == (await c2.observe()));
+      assert (c2 != c1);
+
+      Cycles.add(2_000_000_000_000);
+      let {canister_id = p} = await
+         ic00.create_canister default_settings;
+      // no need to add cycles
+      let c3 = await
+        (system Cs.C)(#install p)(3, null);
+      assert ({args = 3; upgrades = 0} == (await c3.observe()));
+      assert (Prim.principalOfActor c3 == p);
+      assert (c3 != c2);
+
+      // no need to add cycles
+      // upgrade by using enhanced orthogonal persistence
+      let c4 = await
+        (system Cs.C)(#upgrade c3)(4, null);
+      assert ({args = 4; upgrades = 1} == (await c4.observe()));
+      assert (c4 == c3);
+
+      // upgrade by using graph-copy-based stabilization
+      await useIncrementalStabilization(c4).__motoko_stabilize_before_upgrade();
+      let c5 = await
+        (system Cs.C)(#upgrade c4)(5, null);
+      await useIncrementalStabilization(c5).__motoko_destabilize_after_upgrade();
+      assert ({args = 5; upgrades = 2} == (await c5.observe()));
+      assert (c5 == c4);
+
+      let c6 = await
+        (system Cs.C)(#upgrade_with_persistence { wasm_memory_persistence = #Keep ; canister = c5 })(6, null);
+      assert ({args = 6; upgrades = 3} == (await c6.observe()));
+      assert (c6 == c5);
+
+      // no need to add cycles
+      let c7 = await
+        (system Cs.C)(#reinstall c6)(7, null);
+      assert ({args = 7; upgrades = 0} == (await c7.observe()));
+      assert (c7 == c6);
+
+      // no need to add cycles
+      let c8 = await
+        (system Cs.C)(#upgrade_with_persistence { wasm_memory_persistence = #Replace ; canister = c7 })(8, null);
+      assert ({args = 8; upgrades = 0} == (await c8.observe()));
+      assert (c8 == c7);
+
+      let info = await ic00.canister_info {
+          canister_id = p;
+          num_requested_changes = ?4
+      };
+
+      Prim.debugPrint (debug_show info);
+    };
+  };
+
+}
+
+//CALL ingress go "DIDL\x00\x00"
+//SKIP run
+//SKIP run-ir
+//SKIP run-low
diff --git a/test/run-drun/actor-class-mgmt.mo b/test/run-drun/actor-class-mgmt.mo
index a3234d32557..8f11a38d773 100644
--- a/test/run-drun/actor-class-mgmt.mo
+++ b/test/run-drun/actor-class-mgmt.mo
@@ -105,22 +105,10 @@ actor a {
 
       // no need to add cycles
       let c5 = await
-        (system Cs.C)(#upgrade_with_persistence { wasm_memory_persistence = #Keep ; canister = c4 })(5, null);
-      assert ({args = 5; upgrades = 2} == (await c5.observe()));
+        (system Cs.C)(#reinstall c4)(5, null);
+      assert ({args = 5; upgrades = 0} == (await c5.observe()));
       assert (c5 == c4);
 
-      // no need to add cycles
-      let c6 = await
-        (system Cs.C)(#reinstall c5)(6, null);
-      assert ({args = 6; upgrades = 0} == (await c6.observe()));
-      assert (c6 == c5);
-
-      // no need to add cycles
-      let c7 = await
-        (system Cs.C)(#upgrade_with_persistence { wasm_memory_persistence = #Replace ; canister = c6 })(7, null);
-      assert ({args = 7; upgrades = 0} == (await c7.observe()));
-      assert (c7 == c6);
-
       let info = await ic00.canister_info {
           canister_id = p;
           num_requested_changes = ?4
diff --git a/test/run-drun/any-stabilization.drun b/test/run-drun/any-stabilization.drun
new file mode 100644
index 00000000000..57e1298eeec
--- /dev/null
+++ b/test/run-drun/any-stabilization.drun
@@ -0,0 +1,11 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+# SKIP ic-ref-run
+install $ID any-stabilization/version0.mo ""
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x01\x7d\x0F"
+upgrade $ID any-stabilization/version0.mo ""
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x01\x7d\x0F"
+upgrade $ID any-stabilization/version1.mo ""
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x01\x7d\x0F"
+upgrade $ID any-stabilization/version1.mo ""
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x01\x7d\x0F"
+upgrade $ID any-stabilization/version2.mo ""
diff --git a/test/run-drun/any-stabilization/version0.mo b/test/run-drun/any-stabilization/version0.mo
new file mode 100644
index 00000000000..abfec2de7c4
--- /dev/null
+++ b/test/run-drun/any-stabilization/version0.mo
@@ -0,0 +1,3 @@
+actor {
+   stable var value : Any = 123;
+};
diff --git a/test/run-drun/any-stabilization/version1.mo b/test/run-drun/any-stabilization/version1.mo
new file mode 100644
index 00000000000..5a85d0618c3
--- /dev/null
+++ b/test/run-drun/any-stabilization/version1.mo
@@ -0,0 +1,3 @@
+actor {
+   stable var value : Any = "TEST";
+};
diff --git a/test/run-drun/any-stabilization/version2.mo b/test/run-drun/any-stabilization/version2.mo
new file mode 100644
index 00000000000..58d2382656c
--- /dev/null
+++ b/test/run-drun/any-stabilization/version2.mo
@@ -0,0 +1,3 @@
+actor {
+   stable var value = "TEST";
+};
diff --git a/test/run-drun/any-upgrades.drun b/test/run-drun/any-upgrades.drun
index 0417a9cc91c..294c2e4ae1e 100644
--- a/test/run-drun/any-upgrades.drun
+++ b/test/run-drun/any-upgrades.drun
@@ -1,3 +1,4 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
 # SKIP ic-ref-run
 install $ID any-upgrades/version0.mo ""
 upgrade $ID any-upgrades/version0.mo ""
diff --git a/test/run-drun/array-stabilization.drun b/test/run-drun/array-stabilization.drun
new file mode 100644
index 00000000000..22af3de7f8e
--- /dev/null
+++ b/test/run-drun/array-stabilization.drun
@@ -0,0 +1,29 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+# SKIP ic-ref-run
+install $ID array-stabilization/version0.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID array-stabilization/version0.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID array-stabilization/version1.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID array-stabilization/version1.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID array-stabilization/version2.mo ""
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID array-stabilization/version3.mo ""
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID array-stabilization/version4.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID array-stabilization/version4.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID array-stabilization/version0.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID array-stabilization/version0.mo ""
+ingress $ID print "DIDL\x00\x00"
diff --git a/test/run-drun/array-stabilization/version0.mo b/test/run-drun/array-stabilization/version0.mo
new file mode 100644
index 00000000000..a6f4dee2e9d
--- /dev/null
+++ b/test/run-drun/array-stabilization/version0.mo
@@ -0,0 +1,16 @@
+import Prim "mo:prim";
+
+// Compatible upgrade
+actor {
+    type RecursiveArray = ?[RecursiveArray];
+
+    stable var simpleArray = [var 1.0, 2.0, 3.0, 4.0];
+    stable var nestedArray = [[1, 2, 3], [4, 5, 6]];
+    stable var recursiveArray = ?[null, ?[], ?[?[null, null], null]] : RecursiveArray;
+
+    public func print() : async () {
+        Prim.debugPrint(debug_show (simpleArray));
+        Prim.debugPrint(debug_show (nestedArray));
+        Prim.debugPrint(debug_show (recursiveArray));
+    };
+};
diff --git a/test/run-drun/array-stabilization/version1.mo b/test/run-drun/array-stabilization/version1.mo
new file mode 100644
index 00000000000..2798f73c73e
--- /dev/null
+++ b/test/run-drun/array-stabilization/version1.mo
@@ -0,0 +1,16 @@
+import Prim "mo:prim";
+
+// Compatible upgrade
+actor {
+    type RecursiveArray = ?[RecursiveArray];
+
+    stable var simpleArray: [var Float] = [var];
+    stable var nestedArray: [[Nat]] = [];
+    stable var recursiveArray = ?[] : RecursiveArray;
+
+    public func print() : async () {
+        Prim.debugPrint(debug_show (simpleArray));
+        Prim.debugPrint(debug_show (nestedArray));
+        Prim.debugPrint(debug_show (recursiveArray));
+    };
+};
diff --git a/test/run-drun/array-stabilization/version2.mo b/test/run-drun/array-stabilization/version2.mo
new file mode 100644
index 00000000000..28f951e4100
--- /dev/null
+++ b/test/run-drun/array-stabilization/version2.mo
@@ -0,0 +1,16 @@
+import Prim "mo:prim";
+
+// Incompatible upgrade
+actor {
+    type RecursiveArray = ?[RecursiveArray];
+
+    stable var simpleArray: [Float] = [];
+    stable var nestedArray: [Nat] = [];
+    stable var recursiveArray = ?[] : RecursiveArray;
+
+    public func print() : async () {
+        Prim.debugPrint(debug_show (simpleArray));
+        Prim.debugPrint(debug_show (nestedArray));
+        Prim.debugPrint(debug_show (recursiveArray));
+    };
+};
diff --git a/test/run-drun/array-stabilization/version3.mo b/test/run-drun/array-stabilization/version3.mo
new file mode 100644
index 00000000000..2c20e0e875f
--- /dev/null
+++ b/test/run-drun/array-stabilization/version3.mo
@@ -0,0 +1,16 @@
+import Prim "mo:prim";
+
+// Incompatible upgrade
+actor {
+    type RecursiveArray = ?[RecursiveArray];
+
+    stable var simpleArray: [var Float] = [var];
+    stable var nestedArray: [Nat] = [];
+    stable var recursiveArray = ?[] : RecursiveArray;
+
+    public func print() : async () {
+        Prim.debugPrint(debug_show (simpleArray));
+        Prim.debugPrint(debug_show (nestedArray));
+        Prim.debugPrint(debug_show (recursiveArray));
+    };
+};
diff --git a/test/run-drun/array-stabilization/version4.mo b/test/run-drun/array-stabilization/version4.mo
new file mode 100644
index 00000000000..3bd43d15b77
--- /dev/null
+++ b/test/run-drun/array-stabilization/version4.mo
@@ -0,0 +1,15 @@
+import Prim "mo:prim";
+
+// Compatible upgrade
+actor {
+    type ArrayStructure = ?[AliasName];
+    type AliasName = ArrayStructure;
+
+    stable var simpleArray: [var Float] = [var];
+    stable var recursiveArray = ?[] : ArrayStructure;
+
+    public func print() : async () {
+        Prim.debugPrint(debug_show (simpleArray));
+        Prim.debugPrint(debug_show (recursiveArray));
+    };
+};
diff --git a/test/run-drun/array-upgrades.drun b/test/run-drun/array-upgrades.drun
index acdc7558306..97ae0b98a85 100644
--- a/test/run-drun/array-upgrades.drun
+++ b/test/run-drun/array-upgrades.drun
@@ -1,3 +1,4 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
 # SKIP ic-ref-run
 install $ID array-upgrades/version0.mo ""
 ingress $ID print "DIDL\x00\x00"
diff --git a/test/run-drun/blob-array-mismatch.drun b/test/run-drun/blob-array-mismatch.drun
index 140a273175a..d0adf02e7ce 100644
--- a/test/run-drun/blob-array-mismatch.drun
+++ b/test/run-drun/blob-array-mismatch.drun
@@ -1,3 +1,4 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
 install $ID blob-array-mismatch/version0.mo ""
 upgrade $ID blob-array-mismatch/version0.mo ""
 upgrade $ID blob-array-mismatch/version1.mo ""
diff --git a/test/run-drun/blob-stabilization.mo b/test/run-drun/blob-stabilization.mo
new file mode 100644
index 00000000000..1397984bca9
--- /dev/null
+++ b/test/run-drun/blob-stabilization.mo
@@ -0,0 +1,24 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+import Prim "mo:prim";
+
+actor {
+    let blobSize = 32 * 1024 * 1024;
+    let stablePageSize = 64 * 1024;
+    ignore Prim.stableMemoryGrow(Prim.natToNat64(blobSize / stablePageSize));
+
+    stable let blob = Prim.stableMemoryLoadBlob(0, blobSize);
+    stable let small = (123_456_789_123_456_789, "TEST");
+
+    public query func check() : async () {
+        assert (blob.size() == blobSize);
+        assert (small == (123_456_789_123_456_789, "TEST"));
+    };
+};
+//SKIP run
+//SKIP run-ir
+//SKIP run-low
+//CALL ingress check "DIDL\x00\x00"
+//CALL ingress __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+//CALL upgrade
+//CALL ingress check "DIDL\x00\x00"
+
diff --git a/test/run-drun/blob-upgrade.mo b/test/run-drun/blob-upgrade.mo
new file mode 100644
index 00000000000..c606e0d7783
--- /dev/null
+++ b/test/run-drun/blob-upgrade.mo
@@ -0,0 +1,22 @@
+import Prim "mo:prim";
+
+actor {
+    let blobSize = 32 * 1024 * 1024;
+    let stablePageSize = 64 * 1024;
+    ignore Prim.stableMemoryGrow(Prim.natToNat64(blobSize / stablePageSize));
+
+    stable let blob = Prim.stableMemoryLoadBlob(0, blobSize);
+    stable let small = (123_456_789_123_456_789, "TEST");
+
+    public query func check() : async () {
+        assert (blob.size() == blobSize);
+        assert (small == (123_456_789_123_456_789, "TEST"));
+    };
+};
+//SKIP run
+//SKIP run-ir
+//SKIP run-low
+//CALL ingress check "DIDL\x00\x00"
+//CALL upgrade
+//CALL ingress check "DIDL\x00\x00"
+
diff --git a/test/run-drun/class-import.mo b/test/run-drun/class-import.mo
index 7ce67345fb8..e69e6d37ee9 100644
--- a/test/run-drun/class-import.mo
+++ b/test/run-drun/class-import.mo
@@ -29,7 +29,7 @@ actor a {
    // test non-trapping install
    try {
      Cycles.add(2_000_000_000_000);
-     let trap : M3.Trap = await M3.Trap(false);
+     let _trap : M3.Trap = await M3.Trap(false);
    }
    catch _ {
      assert false;
@@ -38,7 +38,7 @@ actor a {
    // test trapping install
    try {
      Cycles.add(2_000_000_000_000);
-     let trap : M3.Trap = await M3.Trap(true);
+     let _trap : M3.Trap = await M3.Trap(true);
      assert false;
    }
    catch _ {
diff --git a/test/run-drun/destabilization-crash.drun b/test/run-drun/destabilization-crash.drun
new file mode 100644
index 00000000000..c68d0da438e
--- /dev/null
+++ b/test/run-drun/destabilization-crash.drun
@@ -0,0 +1,4 @@
+# CLASSICAL-PERSISTENCE-ONLY
+# SKIP ic-ref-run
+install $ID destabilization-crash/destabilization-crash.mo ""
+upgrade $ID destabilization-crash/destabilization-crash.mo ""
diff --git a/test/run-drun/destabilization-crash/destabilization-crash.mo b/test/run-drun/destabilization-crash/destabilization-crash.mo
new file mode 100644
index 00000000000..5f3272e5ab2
--- /dev/null
+++ b/test/run-drun/destabilization-crash/destabilization-crash.mo
@@ -0,0 +1,44 @@
+import Prim = "mo:prim";
+
+// test destabilization of stable variables, without rts stack overflow
+actor a {
+
+   stable let x = Prim.stableMemoryGrow(1);
+   assert Prim.stableMemorySize() == 1;
+
+   type List = ?(T, List);
+
+   stable var map : List<(Blob, Blob)> = null;
+
+   var count : Nat32 = 0;
+
+   func fillMB(mb : Nat) : () {
+     var c = 1024;
+     while (c > 0) {
+       count += 1;
+       Prim.stableMemoryStoreNat32(0, count);
+       var k = Prim.stableMemoryLoadBlob(0, 32);
+       var v = Prim.stableMemoryLoadBlob(0, 65536);
+       map := ?((k,v), map);
+       c -= 1;
+     };
+     if (Prim.rts_heap_size() < mb * 1024 * 1024) {
+      // Difference between incremental and non-incremental GC (due to different object header lengths).
+      let toleranceMB = 4;
+      Prim.debugPrint(debug_show({heap_MB = Prim.rts_heap_size()/1024/1024/toleranceMB*toleranceMB}));
+      fillMB(mb);
+     }
+   };
+
+   system func preupgrade() {
+     Prim.debugPrint "preupgrade!";
+     fillMB(500); // Reduce because `drun` otherwise fails with `Ingress message did not finish executing within 10000 batches`.
+   };
+
+   system func postupgrade() {
+     // if we get here, destabilization has succeeded
+     Prim.debugPrint "postupgrade!";
+     assert false; // trap to avoid saving state to disk
+   }
+
+}
diff --git a/test/run-drun/double-sharing-benchmark.mo b/test/run-drun/double-sharing-benchmark.mo
new file mode 100644
index 00000000000..cb248c49939
--- /dev/null
+++ b/test/run-drun/double-sharing-benchmark.mo
@@ -0,0 +1,41 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+import Prim "mo:prim";
+
+actor {
+    type Node = {
+        value : T;
+        left : ?Node;
+        right : ?Node;
+    };
+
+    stable var root : ?Node = null;
+
+    func double(value : Nat) {
+        root := ?{
+            value;
+            left = root;
+            right = root;
+        };
+    };
+
+
+    public func run() : async () {
+        var size = 0;
+        while (size < 25) {
+            double(size);
+            size += 1;
+        };
+    };
+
+    system func postupgrade() {
+        Prim.debugPrint("Upgraded!");
+    };
+};
+
+//CALL ingress run "DIDL\x00\x00"
+//CALL ingress __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+//CALL upgrade ""
+
+//SKIP run-low
+//SKIP run
+//SKIP run-ir
diff --git a/test/run-drun/empty-actor.mo b/test/run-drun/empty-actor-classical.mo
similarity index 83%
rename from test/run-drun/empty-actor.mo
rename to test/run-drun/empty-actor-classical.mo
index 31684809b4f..564bf9d4ddd 100644
--- a/test/run-drun/empty-actor.mo
+++ b/test/run-drun/empty-actor-classical.mo
@@ -1,3 +1,4 @@
+//CLASSICAL-PERSISTENCE-ONLY
 actor {};
 
 // The prelude should not require any code (besides maybe a call to collect) at runtime
@@ -9,7 +10,7 @@ actor {};
 // CHECK-NEXT:    i32.const 0
 // CHECK-NEXT:    call 32
 // CHECK-NEXT:    global.set 4
-// CHECK-NEXT:    call $incremental_gc
+// CHECK-NEXT:    call ${{copying_gc|compacting_gc|generational_gc|incremental_gc}}
 // CHECK-NEXT:    i32.const 0
 // CHECK-NEXT:    call 32
 // CHECK-NEXT:    global.get 4
@@ -18,5 +19,3 @@ actor {};
 // CHECK-NEXT:    call $trans_state
 
 // CHECK:  (export "canister_init" (func $canister_init))
-
-
diff --git a/test/run-drun/empty-actor-enhanced.mo b/test/run-drun/empty-actor-enhanced.mo
new file mode 100644
index 00000000000..582ff41098a
--- /dev/null
+++ b/test/run-drun/empty-actor-enhanced.mo
@@ -0,0 +1,18 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+actor {};
+
+// The prelude should not require any code (besides maybe a call to collect) at runtime
+// DON'TCHECK: (func $init (type 4))
+
+// CHECK:  (func $@motoko_async_destabilization (type 0)
+// CHECK:         call $trans_state10 
+// CHECK:         call $post_exp
+// CHECK:         call $start_gc_after_destabilization
+// CHECK:         call $trans_state4
+// CHECK:         call $@initialize_main_actor
+
+// CHECK:  (func $canister_init
+// CHECK-NEXT:    call $trans_state
+// CHECK-NEXT:    call $init
+
+// CHECK:  (export "canister_init" (func $canister_init))
diff --git a/test/run-drun/free-callbacks.mo b/test/run-drun/free-callbacks.mo
index ce026670046..c2805a01c12 100644
--- a/test/run-drun/free-callbacks.mo
+++ b/test/run-drun/free-callbacks.mo
@@ -29,7 +29,7 @@ actor a {
     // Checks that GC correctly discards or retains the arrays.
     // Using --forced-gc and allowing young collection for generational GC.
     // It allows for some wiggle room
-    let reserve = 9_000;
+    let reserve = 20_000;
     assert (+s1-s0 < reserve);
     assert (+s2-s0 > 4 * +length - reserve);
   };
diff --git a/test/run-drun/function-stabilization.drun b/test/run-drun/function-stabilization.drun
new file mode 100644
index 00000000000..035d90993f2
--- /dev/null
+++ b/test/run-drun/function-stabilization.drun
@@ -0,0 +1,16 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+install $ID function-stabilization/version0.mo ""
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x01\x7d\x0F"
+upgrade $ID function-stabilization/version0.mo ""
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x01\x7d\x0F"
+upgrade $ID function-stabilization/version1.mo ""
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x01\x7d\x0F"
+upgrade $ID function-stabilization/version1.mo ""
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x01\x7d\x0F"
+upgrade $ID function-stabilization/version2.mo ""
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x01\x7d\x0F"
+upgrade $ID function-stabilization/version3.mo ""
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x01\x7d\x0F"
+upgrade $ID function-stabilization/version4.mo ""
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x01\x7d\x0F"
+upgrade $ID function-stabilization/version1.mo ""
diff --git a/test/run-drun/function-stabilization/version0.mo b/test/run-drun/function-stabilization/version0.mo
new file mode 100644
index 00000000000..6e55e9c0b4c
--- /dev/null
+++ b/test/run-drun/function-stabilization/version0.mo
@@ -0,0 +1,14 @@
+actor {
+   public shared query func f0() : async () { loop {} };
+   stable let x0 = f0;
+
+   public shared composite query func f1(_ : Nat, _ : Bool) : async (Nat, Bool) {
+      loop {};
+   };
+   stable let x1 = f1;
+
+   public shared func f2(_ : {#one; #two}, _ : { oldField : Nat }) : async { oldField : Text } {
+      loop {};
+   };
+   stable let x2 = f2;
+};
diff --git a/test/run-drun/function-stabilization/version1.mo b/test/run-drun/function-stabilization/version1.mo
new file mode 100644
index 00000000000..c951177bfbb
--- /dev/null
+++ b/test/run-drun/function-stabilization/version1.mo
@@ -0,0 +1,14 @@
+actor {
+   public shared query func f0() : async () { loop {} };
+   stable let x0 = f0;
+
+   public shared composite query func f1(_ : Nat, _ : Bool) : async (Nat, Bool) {
+      loop {};
+   };
+   stable let x1 = f1;
+
+   public shared func f2(_ : {#one}, _ : { oldField : Nat; newField: Text }) : async { } {
+      loop {};
+   };
+   stable let x2 = f2;
+};
diff --git a/test/run-drun/function-stabilization/version2.mo b/test/run-drun/function-stabilization/version2.mo
new file mode 100644
index 00000000000..2f1da5cc8b7
--- /dev/null
+++ b/test/run-drun/function-stabilization/version2.mo
@@ -0,0 +1,6 @@
+actor {
+   public shared func f2(_ : {#one; #three}, _ : { oldField : Nat; newField: Text }) : async { } {
+      loop {};
+   };
+   stable let x2 = f2;
+};
diff --git a/test/run-drun/function-stabilization/version3.mo b/test/run-drun/function-stabilization/version3.mo
new file mode 100644
index 00000000000..6fe5a3ac75c
--- /dev/null
+++ b/test/run-drun/function-stabilization/version3.mo
@@ -0,0 +1,6 @@
+actor {
+   public shared func f2(_ : {#one}, _ : { newField: Text }) : async { } {
+      loop {};
+   };
+   stable let x2 = f2;
+};
diff --git a/test/run-drun/function-stabilization/version4.mo b/test/run-drun/function-stabilization/version4.mo
new file mode 100644
index 00000000000..0503260205c
--- /dev/null
+++ b/test/run-drun/function-stabilization/version4.mo
@@ -0,0 +1,6 @@
+actor {
+   public shared func f2(_ : {#one}, _ : { oldField : Nat; newField: Text }) : async { newField: Bool } {
+      loop {};
+   };
+   stable let x2 = f2;
+};
diff --git a/test/run-drun/function-upgrades.drun b/test/run-drun/function-upgrades.drun
index cd83b4859e0..4b93b14ba55 100644
--- a/test/run-drun/function-upgrades.drun
+++ b/test/run-drun/function-upgrades.drun
@@ -1,3 +1,4 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
 install $ID function-upgrades/version0.mo ""
 upgrade $ID function-upgrades/version0.mo ""
 upgrade $ID function-upgrades/version1.mo ""
diff --git a/test/run-drun/gc-trigger.mo b/test/run-drun/gc-trigger.mo
index 8ffe61788c8..fc39006a963 100644
--- a/test/run-drun/gc-trigger.mo
+++ b/test/run-drun/gc-trigger.mo
@@ -2,10 +2,11 @@
 import Prim "mo:prim";
 
 actor {
-    let retained = Prim.Array_init(5 * 1024 * 1024, 0);
+    let retained = Prim.Array_init(6 * 1024 * 1024, 0);
     // GC is triggered during initialization
 
-    var heapSizeWithGarbage = 0;
+    let heapSizeWithoutGarbage = Prim.rts_heap_size();
+    var heapSizeWithGarbage = heapSizeWithoutGarbage;
 
     public func createGarbage(): async() {
         ignore Prim.Array_init(1024 * 1024, 0);
@@ -15,12 +16,13 @@ actor {
     };
 
     public query func checkBeforeGC(): async() {
-        assert(Prim.rts_heap_size() >= heapSizeWithGarbage);
+        assert(Prim.rts_heap_size() >= heapSizeWithoutGarbage);
     };
 
     public query func checkAfterGC(): async() {
         assert(retained.size() > 0); // ensures that the array is not collected
-        assert(Prim.rts_heap_size() < heapSizeWithGarbage);
+        let tolerance = 100_000;
+        assert(Prim.rts_heap_size() < heapSizeWithGarbage + tolerance);
     };
 };
 //SKIP run
diff --git a/test/run-drun/hashBlob.mo b/test/run-drun/hashBlob.mo
new file mode 100644
index 00000000000..63e8a028cc1
--- /dev/null
+++ b/test/run-drun/hashBlob.mo
@@ -0,0 +1,12 @@
+import Prim "mo:⛔";
+
+actor a {
+  public shared ({ caller }) func test() : async Nat32 {
+    let value = Prim.hashBlob(Prim.blobOfPrincipal caller);
+    Prim.debugPrint(debug_show (value));
+    Prim.debugPrint(debug_show (value + 1));
+    value;
+  };
+};
+
+ignore a.test(); //OR-CALL ingress test 0x4449444C0000
diff --git a/test/run-drun/incremental-actor-class-stabilization.mo b/test/run-drun/incremental-actor-class-stabilization.mo
new file mode 100644
index 00000000000..eec2ab594f9
--- /dev/null
+++ b/test/run-drun/incremental-actor-class-stabilization.mo
@@ -0,0 +1,51 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+//MOC-FLAG --stabilization-instruction-limit=10000
+
+import Prim "mo:⛔";
+import Cycles = "cycles/cycles";
+import TestActor "incremental-actor-class-stabilization/test-actor";
+
+actor a {
+  type IncrementalStabilization = actor {
+    __motoko_stabilize_before_upgrade : () -> async ();
+    __motoko_destabilize_after_upgrade : () -> async ();
+  };
+
+  func useIncrementalStabilization(a : actor {}) : IncrementalStabilization {
+    actor (debug_show (Prim.principalOfActor(a))) : IncrementalStabilization;
+  };
+
+  public func run() : async () {
+    if (Cycles.balance() == 0) {
+      await Cycles.provisional_top_up_actor(a, 100_000_000_000_000);
+    };
+
+    Cycles.add(2_000_000_000_000);
+    let testActor = await TestActor.TestActor(1234567890123456789012345678901234567890, "Test actor", Prim.Array_tabulate(100_000, func(index) { index }));
+    let testStabilization = useIncrementalStabilization(testActor);
+    await testActor.print();
+    await testStabilization.__motoko_stabilize_before_upgrade();
+    try {
+      await testActor.print();
+      assert false;
+    } catch (e) {
+      Prim.debugPrint(Prim.errorMessage(e));
+    };
+    Prim.debugPrint("Upgrade");
+    let upgraded = await (system TestActor.TestActor)(#upgrade testActor)(0, "", []);
+    assert (testActor == upgraded);
+    try {
+      await testActor.print();
+      assert false;
+    } catch (e) {
+      Prim.debugPrint(Prim.errorMessage(e));
+    };
+    await testStabilization.__motoko_destabilize_after_upgrade();
+    await testActor.print();
+  };
+};
+
+//CALL ingress run "DIDL\x00\x00"
+//SKIP run
+//SKIP run-ir
+//SKIP run-low
diff --git a/test/run-drun/incremental-actor-class-stabilization/test-actor.mo b/test/run-drun/incremental-actor-class-stabilization/test-actor.mo
new file mode 100644
index 00000000000..b3829bc411b
--- /dev/null
+++ b/test/run-drun/incremental-actor-class-stabilization/test-actor.mo
@@ -0,0 +1,26 @@
+import Prim "mo:⛔";
+
+actor class TestActor(number : Nat, text : Text, array : [Nat]) {
+  stable var stableNat = number;
+  stable var stableInt = -number;
+  stable var stableText = text;
+  stable var stableArray = array;
+
+  public func print() : async () {
+    Prim.debugPrint(debug_show (number));
+    Prim.debugPrint(debug_show (text));
+    Prim.debugPrint(debug_show (array.size()));
+    Prim.debugPrint(debug_show (stableNat));
+    Prim.debugPrint(debug_show (stableInt));
+    Prim.debugPrint(debug_show (stableText));
+    Prim.debugPrint(debug_show (stableArray.size()));
+  };
+
+  system func preupgrade() {
+    Prim.debugPrint("PRE-UPGRADE HOOK!");
+  };
+
+  system func postupgrade() {
+    Prim.debugPrint("POST-UPGRADE HOOK!");
+  };
+};
diff --git a/test/run-drun/incremental-stabilization.mo b/test/run-drun/incremental-stabilization.mo
new file mode 100644
index 00000000000..32c9cd020c6
--- /dev/null
+++ b/test/run-drun/incremental-stabilization.mo
@@ -0,0 +1,69 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+//MOC-FLAG --stabilization-instruction-limit=10000
+
+import Prim "mo:prim";
+
+actor {
+    let flexibleNat = do {
+        Prim.debugPrint("Initialize flexible Nat");
+        1
+    };
+    stable var stableNat = do {
+        Prim.debugPrint("Initialize stable Nat");
+        12345678901234567890123456789012345678901234567890123456789012345678901234567890;
+    };
+    stable var stableInt = do {
+        Prim.debugPrint("Initialize stable Int");
+        -2345678901234567890123456789012345678901234567890123456789012345678901234567890;
+    };
+    stable var stableText = do {
+        Prim.debugPrint("Initialize stable text");
+        "Motoko incremental graph-copy-based upgrade test";
+    };
+    stable var stableArray = do {
+        Prim.debugPrint("Initialize stable array");
+        Prim.Array_tabulate(100, func (index) { index });
+    };
+    stable var stableObject = do {
+        Prim.debugPrint("Initialize stable object");
+        { stableNat; stableInt; stableText; stableArray; }
+    };
+    let flexibleText = do {
+        Prim.debugPrint("Initialize flexible text");
+        "Flexible text"
+    };
+    // To trigger incremental serialization/deserialization
+    stable var _largeStableArray = do {
+         Prim.Array_tabulate(100_000, func (index) { index });
+    };
+
+    public func print() : async () {
+        Prim.debugPrint(debug_show (flexibleNat));
+        Prim.debugPrint(debug_show (flexibleText));
+        Prim.debugPrint(debug_show (stableNat));
+        Prim.debugPrint(debug_show (stableInt));
+        Prim.debugPrint(debug_show (stableText));
+        Prim.debugPrint(debug_show (stableArray));
+        Prim.debugPrint(debug_show (stableObject));
+    };
+
+    system func preupgrade() {
+        Prim.debugPrint("PRE-UPGRADE HOOK!");
+    };
+
+    system func postupgrade() {
+        Prim.debugPrint("POST-UPGRADE HOOK!");
+    };
+};
+
+//CALL ingress print "DIDL\x00\x00"
+//CALL ingress __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+//CALL ingress print "DIDL\x00\x00"
+//CALL upgrade ""
+//CALL ingress print "DIDL\x00\x00"
+//CALL ingress __motoko_destabilize_after_upgrade "DIDL\x00\x00"
+//CALL ingress print "DIDL\x00\x00"
+
+//SKIP run
+//SKIP run-ir
+//SKIP run-low
diff --git a/test/run-drun/large-destabilization.drun b/test/run-drun/large-destabilization.drun
index 069817462cb..bb0712bcb5e 100644
--- a/test/run-drun/large-destabilization.drun
+++ b/test/run-drun/large-destabilization.drun
@@ -1,3 +1,4 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
 # SKIP ic-ref-run
 install $ID large-destabilization/large-destabilization.mo ""
 upgrade $ID large-destabilization/large-destabilization.mo ""
diff --git a/test/run-drun/map-mixed-upgrades.drun b/test/run-drun/map-mixed-upgrades.drun
new file mode 100644
index 00000000000..35d435cbdec
--- /dev/null
+++ b/test/run-drun/map-mixed-upgrades.drun
@@ -0,0 +1,36 @@
+# CLASSICAL-PERSISTENCE-ONLY
+# DEFAULT-GC-ONLY
+# SKIP ic-ref-run
+install $ID map-mixed-upgrades/map0.mo ""
+ingress $ID go "DIDL\x00\x00"
+upgrade $ID map-mixed-upgrades/map0.mo ""
+ingress $ID upgradeNodes "DIDL\x00\x00"
+# This succeeeds:
+ingress $ID upgradeNodesReplaceMainMemory "DIDL\x00\x00"
+# This fails:
+ingress $ID upgradeNodesKeepMainMemory "DIDL\x00\x00"
+ingress $ID go "DIDL\x00\x00"
+upgrade $ID map-mixed-upgrades/map1.mo ""
+ingress $ID upgradeNodes "DIDL\x00\x00"
+ingress $ID go "DIDL\x00\x00"
+upgrade $ID map-mixed-upgrades/map1.mo ""
+ingress $ID upgradeNodes "DIDL\x00\x00"
+ingress $ID go "DIDL\x00\x00"
+# This succeeeds:
+ingress $ID upgradeNodesKeepMainMemory "DIDL\x00\x00"
+ingress $ID go "DIDL\x00\x00"
+# Use stabilization and keep main memory:
+ingress $ID stabilizeNodes "DIDL\x00\x00"
+ingress $ID upgradeNodesKeepMainMemory "DIDL\x00\x00"
+ingress $ID go "DIDL\x00\x00"
+# Use stabilization, but reset main memory:
+ingress $ID stabilizeNodes "DIDL\x00\x00"
+ingress $ID upgradeNodesReplaceMainMemory "DIDL\x00\x00"
+ingress $ID go "DIDL\x00\x00"
+# This fails (downgrade EOP to classical persistence)
+upgrade $ID map-mixed-upgrades/map0.mo ""
+ingress $ID upgradeNodes "DIDL\x00\x00"
+ingress $ID go "DIDL\x00\x00"
+# This drops the memory, state will be lost:
+ingress $ID upgradeNodesReplaceMainMemory "DIDL\x00\x00"
+ingress $ID go "DIDL\x00\x00"
diff --git a/test/run-drun/map-mixed-upgrades/map0.mo b/test/run-drun/map-mixed-upgrades/map0.mo
new file mode 100644
index 00000000000..96ac9dea3a9
--- /dev/null
+++ b/test/run-drun/map-mixed-upgrades/map0.mo
@@ -0,0 +1,107 @@
+// Start with classical persistence, see `map-mixed-upgrades.drun`.
+import Prim "mo:⛔";
+import Cycles = "../cycles/cycles";
+import Lib "node0";
+
+// A naive, distributed map from Nat to Text.
+// Illustrates dynamic installation of imported actor classes.
+// Uses a fixed number of nodes, dynamically installed on demand
+// .. and upgraded with a call to upgradeNodes() (without data loss)
+
+actor a {
+
+  type Key = Nat;
+  type Value = Text;
+
+  // Number of Nodes
+  let n = 8;
+
+  type Node = Lib.Node;
+
+  stable let savedNodes : [var ?(actor{})] = Prim.Array_init(n, null);
+
+  let nodes : [var ?Node] = Prim.Array_init(n, null);
+
+  // Would be nice if these were both tail calls on the platform
+  public func lookup(k : Key) : async ?Value {
+    switch (nodes[k % n]) {
+      case null null;
+      case (?node) await node.lookup(k);
+    };
+  };
+
+  public func insert(k : Key, v : Value) : async () {
+    let i = k % n;
+    let node = switch (nodes[i]) {
+      case null {
+        Cycles.add(2_000_000_000_000);
+        let n = await Lib.Node(i); // dynamically install a new Node
+        nodes[i] := ?n;
+        n;
+      };
+      case (?node) node;
+    };
+    await node.insert(k, v);
+  };
+
+  system func preupgrade () {
+     for (i in nodes.keys()) {
+       savedNodes[i] := nodes[i];
+     }
+  };
+
+  public func upgradeNodes() : async () {
+    for(i in savedNodes.keys()) {
+       switch (savedNodes[i]) {
+         case null {};
+         case (?n) {
+           nodes[i] :=
+             ? (await (system Lib.Node)(#upgrade n)(i)); // upgrade!
+         }
+       }
+    }
+  };
+
+  public func upgradeNodesKeepMainMemory() : async () {
+    for(i in savedNodes.keys()) {
+       switch (savedNodes[i]) {
+         case null {};
+         case (?n) {
+           nodes[i] :=
+             ? (await (system Lib.Node)(#upgrade_with_persistence { wasm_memory_persistence = #Keep; canister = n })(i)); // upgrade!
+         }
+       }
+    }
+  };
+
+  public func upgradeNodesReplaceMainMemory() : async () {
+    for(i in savedNodes.keys()) {
+       switch (savedNodes[i]) {
+         case null {};
+         case (?n) {
+           nodes[i] :=
+             ? (await (system Lib.Node)(#upgrade_with_persistence { wasm_memory_persistence = #Replace; canister = n })(i)); // upgrade!
+         }
+       }
+    }
+  };
+
+  stable var k = 0;
+  // add 2 next keys on each call
+  public func go() : async () {
+    // To get lots of cycles in both drun and ic-ref-run
+    if (Cycles.balance() == 0)
+      await Cycles.provisional_top_up_actor(a, 100_000_000_000_000);
+
+    var i = 0;
+    while (i < 2) {
+      k += 1;
+      let t = debug_show(k);
+      assert (null == (await lookup(k)));
+      await insert(k, t);
+      assert (?t == (await lookup(k)));
+      i += 1;
+    };
+  };
+
+};
diff --git a/test/run-drun/map-mixed-upgrades/map1.mo b/test/run-drun/map-mixed-upgrades/map1.mo
new file mode 100644
index 00000000000..f85e7439064
--- /dev/null
+++ b/test/run-drun/map-mixed-upgrades/map1.mo
@@ -0,0 +1,143 @@
+//MOC-FLAG --enhanced-orthogonal-persistence
+import Prim "mo:⛔";
+import Cycles = "../cycles/cycles";
+import Lib "node1"; // new version!
+import Lib0 "node0"; // old version!
+
+// A naive, distributed map from Nat to Text.
+// Illustrates dynamic installation of imported actor classes.
+// Uses a fixed number of nodes, dynamically installed on demand
+// .. and upgraded with a call to upgradeNodes() (without data loss)
+
+actor a {
+
+  type Key = Nat;
+  type Value = Text;
+
+  // Number of Nodes
+  let n = 8;
+
+  type Node = Lib.Node;
+
+  stable let savedNodes : [var ?(actor{})] = Prim.Array_init(n, null);
+
+  let nodes : [var ?Node] = Prim.Array_init(n, null);
+
+  // Would be nice if these were both tail calls on the platform
+  public func lookup(k : Key) : async ?Value {
+    switch (nodes[k % n]) {
+      case null null;
+      case (?node) await node.lookup(k);
+    };
+  };
+
+  public func insert(k : Key, v : Value) : async () {
+    let i = k % n;
+    let node = switch (nodes[i]) {
+      case null {
+        Cycles.add(2_000_000_000_000);
+        let n = await Lib.Node(i); // dynamically install a new Node
+        nodes[i] := ?n;
+        n;
+      };
+      case (?node) node;
+    };
+    await node.insert(k, v);
+  };
+
+  public func remove(k : Key) : async () {
+    let i = k % n;
+    let node = switch (nodes[i]) {
+      case null { };
+      case (?node) {
+        await node.remove(k);
+      }
+    };
+  };
+
+  system func preupgrade () {
+     for (i in nodes.keys()) {
+       savedNodes[i] := nodes[i];
+     }
+  };
+
+  public func upgradeNodes() : async () {
+    for(i in savedNodes.keys()) {
+       switch (savedNodes[i]) {
+         case null {};
+         case (?n) {
+           nodes[i] :=
+             ? (await (system Lib.Node)(#upgrade n)(i)); // upgrade!
+         }
+       }
+    }
+  };
+
+  public func upgradeNodesKeepMainMemory() : async () {
+    for(i in savedNodes.keys()) {
+       switch (savedNodes[i]) {
+         case null {};
+         case (?n) {
+           nodes[i] :=
+             ? (await (system Lib.Node)(#upgrade_with_persistence { wasm_memory_persistence = #Keep; canister = n })(i)); // upgrade!
+         }
+       }
+    }
+  };
+
+  public func upgradeNodesReplaceMainMemory() : async () {
+    for(i in savedNodes.keys()) {
+       switch (savedNodes[i]) {
+         case null {};
+         case (?n) {
+           nodes[i] :=
+             ? (await (system Lib.Node)(#upgrade_with_persistence { wasm_memory_persistence = #Replace; canister = n })(i)); // upgrade!
+         }
+       }
+    }
+  };
+
+  type IncrementalStabilization = actor {
+    __motoko_stabilize_before_upgrade : () -> async ();
+    __motoko_destabilize_after_upgrade : () -> async ();
+  };
+
+  func useIncrementalStabilization(a : actor {}) : IncrementalStabilization {
+    actor (debug_show (Prim.principalOfActor(a))) : IncrementalStabilization;
+  };
+
+  public func stabilizeNodes() : async () {
+    for(i in savedNodes.keys()) {
+       switch (savedNodes[i]) {
+         case null {};
+         case (?n) {
+           let a = useIncrementalStabilization(n);
+           await a.__motoko_stabilize_before_upgrade();
+         }
+       }
+    }
+  };
+
+  stable var k = 0;
+  // add 2 next keys on each call
+  public func go() : async () {
+    // To get lots of cycles in both drun and ic-ref-run
+    if (Cycles.balance() == 0)
+      await Cycles.provisional_top_up_actor(a, 100_000_000_000_000);
+
+    var i = 0;
+    while (i < 2) {
+      k += 1;
+      let t = debug_show(k);
+      assert (null == (await lookup(k)));
+      await insert(k, t);
+      assert (?t == (await lookup(k)));
+      await remove(k);
+      assert (null == (await lookup(k)));
+      await insert(k, t);
+      assert (?t == (await lookup(k)));
+      i += 1;
+    };
+  };
+
+};
diff --git a/test/run-drun/map-mixed-upgrades/node0.mo b/test/run-drun/map-mixed-upgrades/node0.mo
new file mode 100644
index 00000000000..4816bed5ea8
--- /dev/null
+++ b/test/run-drun/map-mixed-upgrades/node0.mo
@@ -0,0 +1,42 @@
+import Prim "mo:⛔";
+actor class Node(i : Nat) {
+
+  stable var upgrades = 0;
+
+  type Key = Nat;
+  type Value = Text;
+
+  type List = ?(Key, Value, List);
+
+  stable var map : List = null;
+
+  Prim.debugPrint(debug_show {node = i; upgrades = upgrades; state = map});
+
+  public func lookup(k : Key) : async ? Value {
+    Prim.debugPrint(debug_show i # ": lookup " # debug_show k);
+    var m = map;
+    loop {
+      switch m {
+        case (?(k1, v, m1)) {
+          if (k == k1) { return ?v }
+          else {
+            m := m1;
+          };
+        };
+        case null {
+          return null;
+        }
+      }
+    }
+  };
+
+  system func preupgrade () {
+    upgrades += 1;
+  };
+
+  public func insert(k : Key, v : Value) : async () {
+    Prim.debugPrint(debug_show i # ": insert " # debug_show (k,v));
+    map := ?(k, v, map);
+  };
+
+};
diff --git a/test/run-drun/map-mixed-upgrades/node1.mo b/test/run-drun/map-mixed-upgrades/node1.mo
new file mode 100644
index 00000000000..97f0f81ccbf
--- /dev/null
+++ b/test/run-drun/map-mixed-upgrades/node1.mo
@@ -0,0 +1,64 @@
+import Prim "mo:⛔";
+actor class Node(i : Nat) {
+
+  stable var upgrades = 0;
+
+  type Key = Nat;
+  type Value = Text;
+
+  type List = ?(Key, Value, List);
+
+  stable var map : List = null;
+
+  Prim.debugPrint(debug_show {node = i; upgrades = upgrades; state = map});
+
+  public func lookup(k : Key) : async ? Value {
+    Prim.debugPrint(debug_show i # ": lookup " # debug_show k);
+    var m = map;
+    loop {
+      switch m {
+        case (?(k1, v, m1)) {
+          if (k == k1) { return ?v }
+          else {
+            m := m1;
+          };
+        };
+        case null {
+          return null;
+        }
+      }
+    }
+  };
+
+  system func preupgrade () {
+    upgrades += 1;
+  };
+
+  public func insert(k : Key, v : Value) : async () {
+    Prim.debugPrint(debug_show i # ": insert " # debug_show (k,v));
+    map := ?(k, v, map);
+  };
+
+  func rem(k : Key, l : List) : List {
+    switch l {
+      case (?(k1, v1, l1)) {
+        if (k == k1) {
+          rem(k, l1)
+        }
+        else {
+          ?(k1, v1, rem(k, l1))
+        };
+      };
+      case null {
+        return null;
+      }
+    }
+  };
+
+  public func remove(k : Key) : async () {
+    Prim.debugPrint(debug_show i # ": remove " # debug_show (k));
+    map := rem(k, map);
+  };
+
+
+};
diff --git a/test/run-drun/map-stabilization.drun b/test/run-drun/map-stabilization.drun
new file mode 100644
index 00000000000..b6aa1838639
--- /dev/null
+++ b/test/run-drun/map-stabilization.drun
@@ -0,0 +1,15 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+install $ID map-stabilization/map0.mo ""
+ingress $ID go "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID map-stabilization/map0.mo ""
+ingress $ID upgradeNodes "DIDL\x00\x00"
+ingress $ID go "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID map-stabilization/map1.mo ""
+ingress $ID upgradeNodes "DIDL\x00\x00"
+ingress $ID go "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID map-stabilization/map1.mo ""
+ingress $ID upgradeNodes "DIDL\x00\x00"
+ingress $ID go "DIDL\x00\x00"
diff --git a/test/run-drun/map-stabilization/map0.mo b/test/run-drun/map-stabilization/map0.mo
new file mode 100644
index 00000000000..afb067f7935
--- /dev/null
+++ b/test/run-drun/map-stabilization/map0.mo
@@ -0,0 +1,82 @@
+import Prim "mo:⛔";
+import Cycles = "../cycles/cycles";
+import Lib "node0";
+
+// A naive, distributed map from Nat to Text.
+// Illustrates dynamic installation of imported actor classes.
+// Uses a fixed number of nodes, dynamically installed on demand
+// .. and upgraded with a call to upgradeNodes() (without data loss)
+
+actor a {
+
+  type Key = Nat;
+  type Value = Text;
+
+  // Number of Nodes
+  let n = 8;
+
+  type Node = Lib.Node;
+
+  stable let savedNodes : [var ?(actor{})] = Prim.Array_init(n, null);
+
+  let nodes : [var ?Node] = Prim.Array_init(n, null);
+
+  // Would be nice if these were both tail calls on the platform
+  public func lookup(k : Key) : async ?Value {
+    switch (nodes[k % n]) {
+      case null null;
+      case (?node) await node.lookup(k);
+    };
+  };
+
+  public func insert(k : Key, v : Value) : async () {
+    let i = k % n;
+    let node = switch (nodes[i]) {
+      case null {
+        Cycles.add(2_000_000_000_000);
+        let n = await Lib.Node(i); // dynamically install a new Node
+        nodes[i] := ?n;
+        n;
+      };
+      case (?node) node;
+    };
+    await node.insert(k, v);
+  };
+
+  system func preupgrade () {
+     for (i in nodes.keys()) {
+       savedNodes[i] := nodes[i];
+     }
+  };
+
+  public func upgradeNodes() : async () {
+    for(i in savedNodes.keys()) {
+       switch (savedNodes[i]) {
+         case null {};
+         case (?n) {
+           nodes[i] :=
+             ? (await (system Lib.Node)(#upgrade n)(i)); // upgrade!
+         }
+       }
+    }
+  };
+
+  stable var k = 0;
+  // add 2 next keys on each call
+  public func go() : async () {
+    // To get lots of cycles in both drun and ic-ref-run
+    if (Cycles.balance() == 0)
+      await Cycles.provisional_top_up_actor(a, 100_000_000_000_000);
+
+    var i = 0;
+    while (i < 2) {
+      k += 1;
+      let t = debug_show(k);
+      assert (null == (await lookup(k)));
+      await insert(k, t);
+      assert (?t == (await lookup(k)));
+      i += 1;
+    };
+  };
+
+};
diff --git a/test/run-drun/map-stabilization/map1.mo b/test/run-drun/map-stabilization/map1.mo
new file mode 100644
index 00000000000..3356ce3d59d
--- /dev/null
+++ b/test/run-drun/map-stabilization/map1.mo
@@ -0,0 +1,96 @@
+import Prim "mo:⛔";
+import Cycles = "../cycles/cycles";
+import Lib "node1"; // new version!
+
+// A naive, distributed map from Nat to Text.
+// Illustrates dynamic installation of imported actor classes.
+// Uses a fixed number of nodes, dynamically installed on demand
+// .. and upgraded with a call to upgradeNodes() (without data loss)
+
+actor a {
+
+  type Key = Nat;
+  type Value = Text;
+
+  // Number of Nodes
+  let n = 8;
+
+  type Node = Lib.Node;
+
+  stable let savedNodes : [var ?(actor{})] = Prim.Array_init(n, null);
+
+  let nodes : [var ?Node] = Prim.Array_init(n, null);
+
+  // Would be nice if these were both tail calls on the platform
+  public func lookup(k : Key) : async ?Value {
+    switch (nodes[k % n]) {
+      case null null;
+      case (?node) await node.lookup(k);
+    };
+  };
+
+  public func insert(k : Key, v : Value) : async () {
+    let i = k % n;
+    let node = switch (nodes[i]) {
+      case null {
+        Cycles.add(2_000_000_000_000);
+        let n = await Lib.Node(i); // dynamically install a new Node
+        nodes[i] := ?n;
+        n;
+      };
+      case (?node) node;
+    };
+    await node.insert(k, v);
+  };
+
+  public func remove(k : Key) : async () {
+    let i = k % n;
+    let node = switch (nodes[i]) {
+      case null { };
+      case (?node) {
+        await node.remove(k);
+      }
+    };
+  };
+
+  system func preupgrade () {
+     for (i in nodes.keys()) {
+       savedNodes[i] := nodes[i];
+     }
+  };
+
+  public func upgradeNodes() : async () {
+    for(i in savedNodes.keys()) {
+       switch (savedNodes[i]) {
+         case null {};
+         case (?n) {
+           nodes[i] :=
+             ? (await (system Lib.Node)(#upgrade n)(i)); // upgrade!
+         }
+       }
+    }
+  };
+
+  stable var k = 0;
+  // add 2 next keys on each call
+  public func go() : async () {
+    // To get lots of cycles in both drun and ic-ref-run
+    if (Cycles.balance() == 0)
+      await Cycles.provisional_top_up_actor(a, 100_000_000_000_000);
+
+    var i = 0;
+    while (i < 2) {
+      k += 1;
+      let t = debug_show(k);
+      assert (null == (await lookup(k)));
+      await insert(k, t);
+      assert (?t == (await lookup(k)));
+      await remove(k);
+      assert (null == (await lookup(k)));
+      await insert(k, t);
+      assert (?t == (await lookup(k)));
+      i += 1;
+    };
+  };
+
+};
diff --git a/test/run-drun/map-stabilization/node0.mo b/test/run-drun/map-stabilization/node0.mo
new file mode 100644
index 00000000000..4816bed5ea8
--- /dev/null
+++ b/test/run-drun/map-stabilization/node0.mo
@@ -0,0 +1,42 @@
+import Prim "mo:⛔";
+actor class Node(i : Nat) {
+
+  stable var upgrades = 0;
+
+  type Key = Nat;
+  type Value = Text;
+
+  type List = ?(Key, Value, List);
+
+  stable var map : List = null;
+
+  Prim.debugPrint(debug_show {node = i; upgrades = upgrades; state = map});
+
+  public func lookup(k : Key) : async ? Value {
+    Prim.debugPrint(debug_show i # ": lookup " # debug_show k);
+    var m = map;
+    loop {
+      switch m {
+        case (?(k1, v, m1)) {
+          if (k == k1) { return ?v }
+          else {
+            m := m1;
+          };
+        };
+        case null {
+          return null;
+        }
+      }
+    }
+  };
+
+  system func preupgrade () {
+    upgrades += 1;
+  };
+
+  public func insert(k : Key, v : Value) : async () {
+    Prim.debugPrint(debug_show i # ": insert " # debug_show (k,v));
+    map := ?(k, v, map);
+  };
+
+};
diff --git a/test/run-drun/map-stabilization/node1.mo b/test/run-drun/map-stabilization/node1.mo
new file mode 100644
index 00000000000..97f0f81ccbf
--- /dev/null
+++ b/test/run-drun/map-stabilization/node1.mo
@@ -0,0 +1,64 @@
+import Prim "mo:⛔";
+actor class Node(i : Nat) {
+
+  stable var upgrades = 0;
+
+  type Key = Nat;
+  type Value = Text;
+
+  type List = ?(Key, Value, List);
+
+  stable var map : List = null;
+
+  Prim.debugPrint(debug_show {node = i; upgrades = upgrades; state = map});
+
+  public func lookup(k : Key) : async ? Value {
+    Prim.debugPrint(debug_show i # ": lookup " # debug_show k);
+    var m = map;
+    loop {
+      switch m {
+        case (?(k1, v, m1)) {
+          if (k == k1) { return ?v }
+          else {
+            m := m1;
+          };
+        };
+        case null {
+          return null;
+        }
+      }
+    }
+  };
+
+  system func preupgrade () {
+    upgrades += 1;
+  };
+
+  public func insert(k : Key, v : Value) : async () {
+    Prim.debugPrint(debug_show i # ": insert " # debug_show (k,v));
+    map := ?(k, v, map);
+  };
+
+  func rem(k : Key, l : List) : List {
+    switch l {
+      case (?(k1, v1, l1)) {
+        if (k == k1) {
+          rem(k, l1)
+        }
+        else {
+          ?(k1, v1, rem(k, l1))
+        };
+      };
+      case null {
+        return null;
+      }
+    }
+  };
+
+  public func remove(k : Key) : async () {
+    Prim.debugPrint(debug_show i # ": remove " # debug_show (k));
+    map := rem(k, map);
+  };
+
+
+};
diff --git a/test/run-drun/map-upgrades.drun b/test/run-drun/map-upgrades.drun
index 95fac3cb58d..db5be5e7bb8 100644
--- a/test/run-drun/map-upgrades.drun
+++ b/test/run-drun/map-upgrades.drun
@@ -15,4 +15,3 @@ ingress $ID go "DIDL\x00\x00"
 upgrade $ID map-upgrades/map1.mo ""
 ingress $ID upgradeNodes "DIDL\x00\x00"
 ingress $ID go "DIDL\x00\x00"
-
diff --git a/test/run-drun/max-stack-variant.mo b/test/run-drun/max-stack-variant.mo
index 2b0f8160473..a9e011df8e1 100644
--- a/test/run-drun/max-stack-variant.mo
+++ b/test/run-drun/max-stack-variant.mo
@@ -2,7 +2,7 @@
 import { errorMessage; debugPrint; setCandidLimits} = "mo:⛔";
 
 actor {
-    let expectedMinimumSize = 31_000;
+    let expectedMinimumSize = 29_000;
     setCandidLimits{ numerator = 0;
                              denominator = 1;
                              bias = 1_000_000 };
diff --git a/test/run-drun/migrate-candid-sharing.drun b/test/run-drun/migrate-candid-sharing.drun
new file mode 100644
index 00000000000..3731294c9cb
--- /dev/null
+++ b/test/run-drun/migrate-candid-sharing.drun
@@ -0,0 +1,7 @@
+# CLASSICAL-PERSISTENCE-ONLY
+# DEFAULT-GC-ONLY
+# SKIP ic-ref-run
+install $ID migrate-candid-sharing/old-stabilization.mo ""
+upgrade $ID migrate-candid-sharing/new-stabilization.mo ""
+upgrade $ID migrate-candid-sharing/new-stabilization.mo ""
+upgrade $ID migrate-candid-sharing/old-stabilization.mo ""
diff --git a/test/run-drun/migrate-candid-sharing/new-stabilization.mo b/test/run-drun/migrate-candid-sharing/new-stabilization.mo
new file mode 100644
index 00000000000..47d145d496c
--- /dev/null
+++ b/test/run-drun/migrate-candid-sharing/new-stabilization.mo
@@ -0,0 +1,13 @@
+//MOC-FLAG --enhanced-orthogonal-persistence
+import Prim "mo:prim";
+
+actor {
+   type Data = { field1 : Text; field2 : Nat; var field3: ?Data; };
+
+   stable var sharedObject : Data = { field1 = "Test"; field2 = 12345; var field3 = null };
+   sharedObject.field3 := ?sharedObject;
+
+   stable var array : [var Data] = Prim.Array_init(100, sharedObject);
+
+   Prim.debugPrint("INITIALIZED: " # debug_show (array.size()));
+};
diff --git a/test/run-drun/migrate-candid-sharing/old-stabilization.mo b/test/run-drun/migrate-candid-sharing/old-stabilization.mo
new file mode 100644
index 00000000000..07aecef0f65
--- /dev/null
+++ b/test/run-drun/migrate-candid-sharing/old-stabilization.mo
@@ -0,0 +1,13 @@
+// Uses classical persistence, see `migrate-candid-sharing.drun`.
+import Prim "mo:prim";
+
+actor {
+   type Data = { field1 : Text; field2 : Nat; var field3: ?Data; };
+
+   stable var sharedObject : Data = { field1 = "Test"; field2 = 12345; var field3 = null };
+   sharedObject.field3 := ?sharedObject;
+
+   stable var array : [var Data] = Prim.Array_init(100, sharedObject);
+
+   Prim.debugPrint("INITIALIZED: " # debug_show (array.size()));
+};
diff --git a/test/run-drun/migrate-stabilization.drun b/test/run-drun/migrate-stabilization.drun
index 884157d7a91..4a7365e2121 100644
--- a/test/run-drun/migrate-stabilization.drun
+++ b/test/run-drun/migrate-stabilization.drun
@@ -1,8 +1,10 @@
+# CLASSICAL-PERSISTENCE-ONLY
+# DEFAULT-GC-ONLY
 # SKIP ic-ref-run
-install $ID migrate-stabilization/old-stabilization.wasm ""
+install $ID migrate-stabilization/old-stabilization.mo ""
 ingress $ID populate "DIDL\x00\x00"
 ingress $ID print "DIDL\x00\x00"
-upgrade $ID migrate-stabilization/old-stabilization.wasm ""
+upgrade $ID migrate-stabilization/old-stabilization.mo ""
 ingress $ID populate "DIDL\x00\x00"
 ingress $ID print "DIDL\x00\x00"
 upgrade $ID migrate-stabilization/new-stabilization.mo ""
@@ -11,4 +13,4 @@ ingress $ID print "DIDL\x00\x00"
 upgrade $ID migrate-stabilization/new-stabilization.mo ""
 ingress $ID populate "DIDL\x00\x00"
 ingress $ID print "DIDL\x00\x00"
-upgrade $ID migrate-stabilization/old-stabilization.wasm ""
+upgrade $ID migrate-stabilization/old-stabilization.mo ""
diff --git a/test/run-drun/migrate-stabilization/new-stabilization.mo b/test/run-drun/migrate-stabilization/new-stabilization.mo
index c941993cb8f..5291a3c08ed 100644
--- a/test/run-drun/migrate-stabilization/new-stabilization.mo
+++ b/test/run-drun/migrate-stabilization/new-stabilization.mo
@@ -1,3 +1,4 @@
+//MOC-FLAG --enhanced-orthogonal-persistence
 import Prim "mo:prim";
 
 actor {
diff --git a/test/run-drun/migrate-stabilization/old-stabilization-source.txt b/test/run-drun/migrate-stabilization/old-stabilization-source.txt
deleted file mode 100644
index 1f243b469a0..00000000000
--- a/test/run-drun/migrate-stabilization/old-stabilization-source.txt
+++ /dev/null
@@ -1 +0,0 @@
-old-destabilization.wasm is compiled from `new-destabilzation.mo` with old `moc` compiler using `--sanity-checks`.
diff --git a/test/run-drun/migrate-stabilization/old-stabilization.mo b/test/run-drun/migrate-stabilization/old-stabilization.mo
new file mode 100644
index 00000000000..7ac4752eec2
--- /dev/null
+++ b/test/run-drun/migrate-stabilization/old-stabilization.mo
@@ -0,0 +1,51 @@
+// Uses classical persistence, see `migrate-stabilization.drun`.
+import Prim "mo:prim";
+
+actor {
+   type Node = ?{ key : K; value : V; var next : Node };
+   type List = {
+      var first : Node;
+      var last : Node;
+   };
+   stable let list : List = {
+      var first = null;
+      var last = null;
+   };
+   stable var counter = 0;
+
+   Prim.debugPrint("INITIALIZED: " # debug_show(counter));
+
+   func insert(list : List, key : K, value : V) {
+      let node : Node = ?{ key; value; var next = null };
+      switch (list.last) {
+         case null {
+            list.first := node;
+         };
+         case (?previous) {
+            previous.next := node;
+         };
+      };
+      list.last := node;
+   };
+
+   public func populate() : async () {
+      let limit = counter + 10;
+      while (counter < limit) {
+         insert(list, counter, debug_show (counter));
+         counter += 1;
+      };
+   };
+
+   public func print() : async () {
+      var current = list.first;
+      loop {
+         switch current {
+            case null return;
+            case (?node) {
+               Prim.debugPrint(debug_show (node.key) # " " # node.value);
+               current := node.next;
+            };
+         };
+      };
+   };
+};
diff --git a/test/run-drun/migrate-stabilization/old-stabilization.wasm b/test/run-drun/migrate-stabilization/old-stabilization.wasm
deleted file mode 100644
index 4ae5850831a..00000000000
Binary files a/test/run-drun/migrate-stabilization/old-stabilization.wasm and /dev/null differ
diff --git a/test/run-drun/migration-paths.drun b/test/run-drun/migration-paths.drun
new file mode 100644
index 00000000000..026f98ab1cd
--- /dev/null
+++ b/test/run-drun/migration-paths.drun
@@ -0,0 +1,26 @@
+# CLASSICAL-PERSISTENCE-ONLY
+# DEFAULT-GC-ONLY
+# SKIP ic-ref-run
+
+# Start with classical persistence
+install $ID migration-paths/old-installer.mo ""
+ingress $ID install "DIDL\x00\x00"
+ingress $ID upgrade "DIDL\x00\x00"
+ingress $ID upgrade_replace_main_memory "DIDL\x00\x00"
+# This fails:
+ingress $ID upgrade_keep_main_memory "DIDL\x00\x00"
+
+# Now, upgrade to enhanced orthogonal persistence
+upgrade $ID migration-paths/new-installer.mo ""
+# Try wrong upgrade option: RTS detects and handles this.
+ingress $ID upgrade_keep_main_memory "DIDL\x00\x00"
+# Initiate graph copy
+ingress $ID initiate_graph_copy "DIDL\x00\x00"
+# Discard EOP main memory, but retain state of graph copy
+ingress $ID upgrade_replace_main_memory "DIDL\x00\x00"
+# Initiate graph copy
+ingress $ID initiate_graph_copy "DIDL\x00\x00"
+ingress $ID upgrade_keep_main_memory "DIDL\x00\x00"
+
+# Try to downgrade. This will fail:
+upgrade $ID migration-paths/old-installer.mo ""
diff --git a/test/run-drun/migration-paths/new-installer.mo b/test/run-drun/migration-paths/new-installer.mo
new file mode 100644
index 00000000000..de73aae5e23
--- /dev/null
+++ b/test/run-drun/migration-paths/new-installer.mo
@@ -0,0 +1,57 @@
+//MOC-FLAG --enhanced-orthogonal-persistence
+import Prim "mo:⛔";
+import TestCanister "test_canister";
+
+actor {
+    stable var testCanister : ?TestCanister.TestCanister = null;
+
+    public func upgrade() : async () {
+        switch testCanister {
+            case null Prim.trap("null canister");
+            case (?canister) {
+                ignore await (system TestCanister.TestCanister)(#upgrade canister)();
+                Prim.debugPrint("Upgraded (default)");
+            };
+        };
+    };
+
+    public func upgrade_keep_main_memory() : async () {
+        switch testCanister {
+            case null Prim.trap("null canister");
+            case (?canister) {
+                ignore await (system TestCanister.TestCanister)(#upgrade_with_persistence { wasm_memory_persistence = #Keep; canister })();
+                Prim.debugPrint("Upgraded (keep main memory)");
+            };
+        };
+    };
+
+    public func upgrade_replace_main_memory() : async () {
+        switch testCanister {
+            case null Prim.trap("null canister");
+            case (?canister) {
+                ignore await (system TestCanister.TestCanister)(#upgrade_with_persistence { wasm_memory_persistence = #Replace; canister })();
+                Prim.debugPrint("Upgraded (replace main memory)");
+            };
+        };
+    };
+
+    type GraphCopy = actor {
+        __motoko_stabilize_before_upgrade : () -> async ();
+    };
+
+    func getGraphCopy(a : actor {}) : GraphCopy {
+        let graphCopy = actor (debug_show (Prim.principalOfActor(a))) : GraphCopy;
+        graphCopy;
+    };
+
+    public func initiate_graph_copy() : async () {
+        switch testCanister {
+            case null Prim.trap("null canister");
+            case (?canister) {
+                let graphCopy = getGraphCopy(canister);
+                await graphCopy.__motoko_stabilize_before_upgrade();
+                Prim.debugPrint("Graph copy");
+            };
+        };
+    };
+};
diff --git a/test/run-drun/migration-paths/old-installer.mo b/test/run-drun/migration-paths/old-installer.mo
new file mode 100644
index 00000000000..461e2f459e7
--- /dev/null
+++ b/test/run-drun/migration-paths/old-installer.mo
@@ -0,0 +1,49 @@
+// Uses classical persistence, see `migrations-path.drun`.
+import Prim "mo:⛔";
+import Cycles = "../cycles/cycles";
+import TestCanister "test_canister";
+
+actor installer {
+    stable var testCanister: ?TestCanister.TestCanister = null;
+
+    public func install() : async () {
+        if (Cycles.balance() == 0) {
+            await Cycles.provisional_top_up_actor(installer, 100_000_000_000_000);
+        };
+
+        Cycles.add(2_000_000_000_000);
+        testCanister := ?(await TestCanister.TestCanister());
+
+        Prim.debugPrint("Installed");
+    };
+
+    public func upgrade() : async () {
+         switch testCanister {
+            case null Prim.trap("null canister");
+            case (?canister) {
+                ignore await (system TestCanister.TestCanister)(#upgrade canister)();
+                Prim.debugPrint("Upgraded (default)");
+            }
+         }
+    };
+
+    public func upgrade_keep_main_memory() : async () {
+         switch testCanister {
+            case null Prim.trap("null canister");
+            case (?canister) {
+                ignore await (system TestCanister.TestCanister)(#upgrade_with_persistence { wasm_memory_persistence =  #Keep; canister })();
+                Prim.debugPrint("Upgraded (keep main memory)");
+            }
+         }
+    };
+
+    public func upgrade_replace_main_memory() : async () {
+         switch testCanister {
+            case null Prim.trap("null canister");
+            case (?canister) {
+                ignore await (system TestCanister.TestCanister)(#upgrade_with_persistence { wasm_memory_persistence = #Replace; canister })();
+                Prim.debugPrint("Upgraded (replace main memory)");
+            }
+         }
+    };
+};
diff --git a/test/run-drun/migration-paths/test_canister.mo b/test/run-drun/migration-paths/test_canister.mo
new file mode 100644
index 00000000000..25f99873d8d
--- /dev/null
+++ b/test/run-drun/migration-paths/test_canister.mo
@@ -0,0 +1,18 @@
+import Prim "mo:prim";
+
+actor class TestCanister() {
+   let length = 8 * 1024 * 1024;
+   func initialize() : [var Nat] {
+      let array = Prim.Array_init(length, 0xfff_ffff);
+      Prim.debugPrint("array initialized");
+      array;
+   };
+
+   stable var array = initialize();
+
+   stable var version = 0;
+   version += 1;
+
+   assert (array.size() == length);
+   Prim.debugPrint("version: " # debug_show (version));
+};
diff --git a/test/run-drun/more-function-stabilization.drun b/test/run-drun/more-function-stabilization.drun
new file mode 100644
index 00000000000..77489af1e0a
--- /dev/null
+++ b/test/run-drun/more-function-stabilization.drun
@@ -0,0 +1,8 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+install $ID more-function-stabilization/version0.mo ""
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x01\x7d\x0F"
+upgrade $ID more-function-stabilization/version0.mo ""
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x01\x7d\x0F"
+upgrade $ID more-function-stabilization/version1.mo ""
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x01\x7d\x0F"
+upgrade $ID more-function-stabilization/version2.mo ""
diff --git a/test/run-drun/more-function-stabilization/version0.mo b/test/run-drun/more-function-stabilization/version0.mo
new file mode 100644
index 00000000000..3111aae3f70
--- /dev/null
+++ b/test/run-drun/more-function-stabilization/version0.mo
@@ -0,0 +1,4 @@
+actor {
+   public shared query func test(x: ?Nat) : async () { loop {} };
+   stable let shared_function = test;
+};
diff --git a/test/run-drun/more-function-stabilization/version1.mo b/test/run-drun/more-function-stabilization/version1.mo
new file mode 100644
index 00000000000..49cef225a0e
--- /dev/null
+++ b/test/run-drun/more-function-stabilization/version1.mo
@@ -0,0 +1,4 @@
+actor {
+   public shared query func test() : async () { loop {} };
+   stable let shared_function = test;
+};
diff --git a/test/run-drun/more-function-stabilization/version2.mo b/test/run-drun/more-function-stabilization/version2.mo
new file mode 100644
index 00000000000..bc44cee409d
--- /dev/null
+++ b/test/run-drun/more-function-stabilization/version2.mo
@@ -0,0 +1,4 @@
+actor {
+   public shared query func test(x: ?Nat) : async ?Int { loop {} };
+   stable let shared_function = test;
+};
diff --git a/test/run-drun/more-function-upgrades.drun b/test/run-drun/more-function-upgrades.drun
index 4e4e1c30411..8015bf728f7 100644
--- a/test/run-drun/more-function-upgrades.drun
+++ b/test/run-drun/more-function-upgrades.drun
@@ -1,3 +1,4 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
 install $ID more-function-upgrades/version0.mo ""
 upgrade $ID more-function-upgrades/version0.mo ""
 upgrade $ID more-function-upgrades/version1.mo ""
diff --git a/test/run-drun/non-incremental-stabilization.mo b/test/run-drun/non-incremental-stabilization.mo
new file mode 100644
index 00000000000..89f2189f707
--- /dev/null
+++ b/test/run-drun/non-incremental-stabilization.mo
@@ -0,0 +1,39 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+import Prim "mo:prim";
+
+actor {
+    stable var stableNat = 12345678901234567890123456789012345678901234567890123456789012345678901234567890;
+    stable var stableInt = -2345678901234567890123456789012345678901234567890123456789012345678901234567890;
+    stable var stableText = "Motoko graph-copy-based upgrade test";
+    
+    public func print() : async () {
+        Prim.debugPrint(debug_show (stableNat));
+        Prim.debugPrint(debug_show (stableInt));
+        Prim.debugPrint(debug_show (stableText));
+    };
+
+    system func preupgrade() {
+        Prim.debugPrint("PRE-UPGRADE HOOK!");
+    };
+
+    system func postupgrade() {
+        Prim.debugPrint("POST-UPGRADE HOOK!");
+    };
+};
+
+// Testing different invalid and correct combinations of explicit (de)stabilization calls
+
+//CALL ingress print "DIDL\x00\x00"
+//CALL ingress __motoko_destabilize_after_upgrade "DIDL\x00\x00"
+//CALL upgrade ""
+//CALL ingress __motoko_destabilize_after_upgrade "DIDL\x00\x00"
+//CALL ingress print "DIDL\x00\x00"
+//CALL ingress __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+//CALL ingress __motoko_destabilize_after_upgrade "DIDL\x00\x00"
+//CALL ingress print "DIDL\x00\x00"
+//CALL upgrade ""
+//CALL ingress print "DIDL\x00\x00"
+
+//SKIP run
+//SKIP run-ir
+//SKIP run-low
diff --git a/test/run-drun/ok/actor-arg.drun.ok b/test/run-drun/ok/actor-arg.drun.ok
index 8ecc4b9a562..2bc6bd6a9ab 100644
--- a/test/run-drun/ok/actor-arg.drun.ok
+++ b/test/run-drun/ok/actor-arg.drun.ok
@@ -1,4 +1,5 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: missing magic bytes
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: missing magic bytes.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/actor-class-mgmt-enhanced.drun-run.ok b/test/run-drun/ok/actor-class-mgmt-enhanced.drun-run.ok
new file mode 100644
index 00000000000..794156f8f4a
--- /dev/null
+++ b/test/run-drun/ok/actor-class-mgmt-enhanced.drun-run.ok
@@ -0,0 +1,4 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {controllers = [rwlgt-iiaaa-aaaaa-aaaaa-cai]; recent_changes = [{canister_version = 8; details = #code_deployment({mode = #upgrade}); origin = #from_canister({canister_id = rwlgt-iiaaa-aaaaa-aaaaa-cai; canister_version = ?27})}, {canister_version = 13; details = #code_deployment({mode = #upgrade}); origin = #from_canister({canister_id = rwlgt-iiaaa-aaaaa-aaaaa-cai; canister_version = ?32})}, {canister_version = 15; details = #code_deployment({mode = #reinstall}); origin = #from_canister({canister_id = rwlgt-iiaaa-aaaaa-aaaaa-cai; canister_version = ?36})}, {canister_version = 17; details = #code_deployment({mode = #upgrade}); origin = #from_canister({canister_id = rwlgt-iiaaa-aaaaa-aaaaa-cai; canister_version = ?40})}]; total_num_changes = 7}
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/actor-class-mgmt.drun-run.ok b/test/run-drun/ok/actor-class-mgmt.drun-run.ok
index bddc341e0a0..ff06ef84fef 100644
--- a/test/run-drun/ok/actor-class-mgmt.drun-run.ok
+++ b/test/run-drun/ok/actor-class-mgmt.drun-run.ok
@@ -1,4 +1,4 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
-debug.print: {controllers = [rwlgt-iiaaa-aaaaa-aaaaa-cai]; recent_changes = [{canister_version = 3; details = #code_deployment({mode = #upgrade}); origin = #from_canister({canister_id = rwlgt-iiaaa-aaaaa-aaaaa-cai; canister_version = ?22})}, {canister_version = 5; details = #code_deployment({mode = #upgrade}); origin = #from_canister({canister_id = rwlgt-iiaaa-aaaaa-aaaaa-cai; canister_version = ?26})}, {canister_version = 7; details = #code_deployment({mode = #reinstall}); origin = #from_canister({canister_id = rwlgt-iiaaa-aaaaa-aaaaa-cai; canister_version = ?30})}, {canister_version = 9; details = #code_deployment({mode = #upgrade}); origin = #from_canister({canister_id = rwlgt-iiaaa-aaaaa-aaaaa-cai; canister_version = ?34})}]; total_num_changes = 6}
+debug.print: {controllers = [rwlgt-iiaaa-aaaaa-aaaaa-cai]; recent_changes = [{canister_version = 0; details = #creation({controllers = [rwlgt-iiaaa-aaaaa-aaaaa-cai]}); origin = #from_canister({canister_id = rwlgt-iiaaa-aaaaa-aaaaa-cai; canister_version = null})}, {canister_version = 1; details = #code_deployment({mode = #install}); origin = #from_canister({canister_id = rwlgt-iiaaa-aaaaa-aaaaa-cai; canister_version = ?18})}, {canister_version = 3; details = #code_deployment({mode = #upgrade}); origin = #from_canister({canister_id = rwlgt-iiaaa-aaaaa-aaaaa-cai; canister_version = ?22})}, {canister_version = 5; details = #code_deployment({mode = #reinstall}); origin = #from_canister({canister_id = rwlgt-iiaaa-aaaaa-aaaaa-cai; canister_version = ?26})}]; total_num_changes = 4}
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/actor-reference-bad.drun-run.ok b/test/run-drun/ok/actor-reference-bad.drun-run.ok
index f08ed5ad9b6..c79f5ed4741 100644
--- a/test/run-drun/ok/actor-reference-bad.drun-run.ok
+++ b/test/run-drun/ok/actor-reference-bad.drun-run.ok
@@ -2,13 +2,18 @@ ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a000000000000000001
 ingress Completed: Reply: 0x4449444c0000
 debug.print: "bfozs-kwa73-7nadi":
 debug.print: "":
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: blob_of_principal: principal too short
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: blob_of_principal: principal too short.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: "BFOZS-KWA73-7NADI":
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: blob_of_principal: invalid principal
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: blob_of_principal: invalid principal.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: "bfozskwa737nadi":
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: blob_of_principal: invalid principal
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: blob_of_principal: invalid principal.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: "vpgq":
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: blob_of_principal: principal too short
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: blob_of_principal: principal too short.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: "5h74t-uga73-7nadi":
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: blob_of_principal: invalid principal
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: blob_of_principal: invalid principal.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/any-stabilization.drun.ok b/test/run-drun/ok/any-stabilization.drun.ok
new file mode 100644
index 00000000000..0cbc240bd2c
--- /dev/null
+++ b/test/run-drun/ok/any-stabilization.drun.ok
@@ -0,0 +1,11 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/any-upgrades.drun.ok b/test/run-drun/ok/any-upgrades.drun.ok
index 74612ec48fa..345ab75ad08 100644
--- a/test/run-drun/ok/any-upgrades.drun.ok
+++ b/test/run-drun/ok/any-upgrades.drun.ok
@@ -3,4 +3,5 @@ ingress Completed: Reply: 0x4449444c0000
 ingress Completed: Reply: 0x4449444c0000
 ingress Completed: Reply: 0x4449444c0000
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/array-out-of-bounds.drun-run.ok b/test/run-drun/ok/array-out-of-bounds.drun-run.ok
index 816618bf79c..5b9360e8005 100644
--- a/test/run-drun/ok/array-out-of-bounds.drun-run.ok
+++ b/test/run-drun/ok/array-out-of-bounds.drun-run.ok
@@ -1,4 +1,6 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: Array index out of bounds
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: Array index out of bounds
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: Array index out of bounds.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: Array index out of bounds.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/array-stabilization.drun.ok b/test/run-drun/ok/array-stabilization.drun.ok
new file mode 100644
index 00000000000..222390f1a45
--- /dev/null
+++ b/test/run-drun/ok/array-stabilization.drun.ok
@@ -0,0 +1,52 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+debug.print: [var 1.000000, 2.000000, 3.000000, 4.000000]
+debug.print: [[1, 2, 3], [4, 5, 6]]
+debug.print: ?[null, ?[], ?[?[null, null], null]]
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: [var 1.000000, 2.000000, 3.000000, 4.000000]
+debug.print: [[1, 2, 3], [4, 5, 6]]
+debug.print: ?[null, ?[], ?[?[null, null], null]]
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: [var 1.000000, 2.000000, 3.000000, 4.000000]
+debug.print: [[1, 2, 3], [4, 5, 6]]
+debug.print: ?[null, ?[], ?[?[null, null], null]]
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: [var 1.000000, 2.000000, 3.000000, 4.000000]
+debug.print: [[1, 2, 3], [4, 5, 6]]
+debug.print: ?[null, ?[], ?[?[null, null], null]]
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: [var 1.000000, 2.000000, 3.000000, 4.000000]
+debug.print: ?[null, ?[], ?[?[null, null], null]]
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: [var 1.000000, 2.000000, 3.000000, 4.000000]
+debug.print: ?[null, ?[], ?[?[null, null], null]]
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: [var 1.000000, 2.000000, 3.000000, 4.000000]
+debug.print: [[1, 2, 3], [4, 5, 6]]
+debug.print: ?[null, ?[], ?[?[null, null], null]]
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: [var 1.000000, 2.000000, 3.000000, 4.000000]
+debug.print: [[1, 2, 3], [4, 5, 6]]
+debug.print: ?[null, ?[], ?[?[null, null], null]]
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/array-upgrades.drun.ok b/test/run-drun/ok/array-upgrades.drun.ok
index 05e09719101..b190539f514 100644
--- a/test/run-drun/ok/array-upgrades.drun.ok
+++ b/test/run-drun/ok/array-upgrades.drun.ok
@@ -19,8 +19,10 @@ debug.print: [var 1.000000, 2.000000, 3.000000, 4.000000]
 debug.print: [[1, 2, 3], [4, 5, 6]]
 debug.print: ?[null, ?[], ?[?[null, null], null]]
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 ingress Completed: Reply: 0x4449444c0000
 debug.print: [var 1.000000, 2.000000, 3.000000, 4.000000]
 debug.print: ?[null, ?[], ?[?[null, null], null]]
diff --git a/test/run-drun/ok/assert.drun-run.ok b/test/run-drun/ok/assert.drun-run.ok
index 2cdc44a51bd..8d426cd2f92 100644
--- a/test/run-drun/ok/assert.drun-run.ok
+++ b/test/run-drun/ok/assert.drun-run.ok
@@ -1,3 +1,4 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: assertion failed at Lib.mo:3.27-5.4
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: assertion failed at Lib.mo:3.27-5.4.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/basic-cycles.drun-run.ok b/test/run-drun/ok/basic-cycles.drun-run.ok
index 113bff7f0bb..26919df4a8e 100644
--- a/test/run-drun/ok/basic-cycles.drun-run.ok
+++ b/test/run-drun/ok/basic-cycles.drun-run.ok
@@ -1,9 +1,12 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: cannot add more than 2^128 cycles
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: cannot add more than 2^128 cycles.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: ok
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: cannot add more than 2^128 cycles
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: cycles out of bounds
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: cannot add more than 2^128 cycles.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: cycles out of bounds.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {balance = 100_000_000_000_000}
 debug.print: {topped_up_balance = 100_000_000_000_000}
 debug.print: {added = 0}
diff --git a/test/run-drun/ok/blob-array-mismatch.drun.ok b/test/run-drun/ok/blob-array-mismatch.drun.ok
index f5f49f31f1f..171e156ee98 100644
--- a/test/run-drun/ok/blob-array-mismatch.drun.ok
+++ b/test/run-drun/ok/blob-array-mismatch.drun.ok
@@ -1,7 +1,9 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 ingress Completed: Reply: 0x4449444c0000
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/blob-stabilization.drun-run.ok b/test/run-drun/ok/blob-stabilization.drun-run.ok
new file mode 100644
index 00000000000..9eece096afc
--- /dev/null
+++ b/test/run-drun/ok/blob-stabilization.drun-run.ok
@@ -0,0 +1,6 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/blob-stabilization.tc.ok b/test/run-drun/ok/blob-stabilization.tc.ok
new file mode 100644
index 00000000000..46bad68cc47
--- /dev/null
+++ b/test/run-drun/ok/blob-stabilization.tc.ok
@@ -0,0 +1,2 @@
+blob-stabilization.mo:7.12-7.33: warning [M0199], this code is (or uses) the deprecated library `ExperimentalStableMemory`.
+Please use the `Region` library instead: https://internetcomputer.org/docs/current/motoko/main/stable-memory/stable-regions/#the-region-library or compile with flag `--experimental-stable-memory 1` to suppress this message.
diff --git a/test/run-drun/ok/blob-upgrade.drun-run.ok b/test/run-drun/ok/blob-upgrade.drun-run.ok
new file mode 100644
index 00000000000..a36906b5b9e
--- /dev/null
+++ b/test/run-drun/ok/blob-upgrade.drun-run.ok
@@ -0,0 +1,5 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/blob-upgrade.tc.ok b/test/run-drun/ok/blob-upgrade.tc.ok
new file mode 100644
index 00000000000..8569349f63e
--- /dev/null
+++ b/test/run-drun/ok/blob-upgrade.tc.ok
@@ -0,0 +1,2 @@
+blob-upgrade.mo:6.12-6.33: warning [M0199], this code is (or uses) the deprecated library `ExperimentalStableMemory`.
+Please use the `Region` library instead: https://internetcomputer.org/docs/current/motoko/main/stable-memory/stable-regions/#the-region-library or compile with flag `--experimental-stable-memory 1` to suppress this message.
diff --git a/test/run-drun/ok/call-async-method.drun-run.ok b/test/run-drun/ok/call-async-method.drun-run.ok
index 701434a8d92..a25b6a42feb 100644
--- a/test/run-drun/ok/call-async-method.drun-run.ok
+++ b/test/run-drun/ok/call-async-method.drun-run.ok
@@ -1,3 +1,4 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: not a self-call
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: not a self-call.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/call-raw-candid.drun-run.ok b/test/run-drun/ok/call-raw-candid.drun-run.ok
index f01abb9e600..cebd7fe8635 100644
--- a/test/run-drun/ok/call-raw-candid.drun-run.ok
+++ b/test/run-drun/ok/call-raw-candid.drun-run.ok
@@ -4,7 +4,9 @@ debug.print: unit!
 debug.print: ("int", +1)
 debug.print: ("text", "hello")
 debug.print: ("text", (1, true, 'a'))
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: unexpected IDL type when parsing Nat
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: ohoh
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: unexpected IDL type when parsing Nat.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: ohoh.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: supercalifragilisticexpialidocious
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/call-raw.drun-run.ok b/test/run-drun/ok/call-raw.drun-run.ok
index 13c391738f9..94f1004cf6b 100644
--- a/test/run-drun/ok/call-raw.drun-run.ok
+++ b/test/run-drun/ok/call-raw.drun-run.ok
@@ -9,6 +9,7 @@ debug.print: ("int", +1)
 debug.print: ("int", +2)
 debug.print: ("text", "hello")
 debug.print: ("text", (1, true, 'a'))
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: ohoh
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: ohoh.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: supercalifragilisticexpialidocious
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/callback-trap-upgrade.drun-run.ok b/test/run-drun/ok/callback-trap-upgrade.drun-run.ok
index 1f69dcb02b3..c1d415ad2d7 100644
--- a/test/run-drun/ok/callback-trap-upgrade.drun-run.ok
+++ b/test/run-drun/ok/callback-trap-upgrade.drun-run.ok
@@ -6,7 +6,8 @@ ingress Completed: Reply: 0x4449444c0000
 debug.print: In go(), outstanding callbacks: 0
 debug.print: In ping(), outstanding callbacks: 1
 debug.print: In go() again, outstanding callbacks: 0
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: assertion failed at callback-trap-upgrade.mo:13.5-13.18
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: assertion failed at callback-trap-upgrade.mo:13.5-13.18.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: In stats(), outstanding callbacks: 0
 ingress Completed: Reply: 0x4449444c0000
 debug.print: init'ed
diff --git a/test/run-drun/ok/class-import.tc.ok b/test/run-drun/ok/class-import.tc.ok
deleted file mode 100644
index 543b6ceb683..00000000000
--- a/test/run-drun/ok/class-import.tc.ok
+++ /dev/null
@@ -1,2 +0,0 @@
-class-import.mo:32.10-32.14: warning [M0194], unused identifier trap (delete or rename to wildcard `_` or `_trap`)
-class-import.mo:41.10-41.14: warning [M0194], unused identifier trap (delete or rename to wildcard `_` or `_trap`)
diff --git a/test/run-drun/ok/destabilization-crash.drun.ok b/test/run-drun/ok/destabilization-crash.drun.ok
new file mode 100644
index 00000000000..ea58742ce4f
--- /dev/null
+++ b/test/run-drun/ok/destabilization-crash.drun.ok
@@ -0,0 +1,13 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+debug.print: preupgrade!
+debug.print: {heap_MB = 64}
+debug.print: {heap_MB = 128}
+debug.print: {heap_MB = 192}
+debug.print: {heap_MB = 256}
+debug.print: {heap_MB = 320}
+debug.print: {heap_MB = 384}
+debug.print: {heap_MB = 448}
+debug.print: postupgrade!
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: assertion failed at destabilization-crash.mo:41.6-41.18.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/divide-by-zero.drun-run.ok b/test/run-drun/ok/divide-by-zero.drun-run.ok
index 12b27047055..161fd48b034 100644
--- a/test/run-drun/ok/divide-by-zero.drun-run.ok
+++ b/test/run-drun/ok/divide-by-zero.drun-run.ok
@@ -1,2 +1,3 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
-ingress Err: IC0502: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped: integer division by 0
+ingress Err: IC0502: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister trapped: integer division by 0.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped
diff --git a/test/run-drun/ok/double-sharing-benchmark.drun-run.ok b/test/run-drun/ok/double-sharing-benchmark.drun-run.ok
new file mode 100644
index 00000000000..0b97c74f0b1
--- /dev/null
+++ b/test/run-drun/ok/double-sharing-benchmark.drun-run.ok
@@ -0,0 +1,6 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Upgraded!
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/empty-actor.drun-run.ok b/test/run-drun/ok/empty-actor-classical.drun-run.ok
similarity index 100%
rename from test/run-drun/ok/empty-actor.drun-run.ok
rename to test/run-drun/ok/empty-actor-classical.drun-run.ok
diff --git a/test/run-drun/ok/empty-actor-enhanced.drun-run.ok b/test/run-drun/ok/empty-actor-enhanced.drun-run.ok
new file mode 100644
index 00000000000..a6f776f43c6
--- /dev/null
+++ b/test/run-drun/ok/empty-actor-enhanced.drun-run.ok
@@ -0,0 +1,2 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/error-codes.drun-run.ok b/test/run-drun/ok/error-codes.drun-run.ok
index fb116c037b8..ec52f8f344a 100644
--- a/test/run-drun/ok/error-codes.drun-run.ok
+++ b/test/run-drun/ok/error-codes.drun-run.ok
@@ -1,5 +1,6 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
 debug.print: canister_reject:ball
-debug.print: canister_error:IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: assertion failed at error-codes.mo:10.33-10.50
+debug.print: canister_error:IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: assertion failed at error-codes.mo:10.33-10.50.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/explicit-trap.drun-run.ok b/test/run-drun/ok/explicit-trap.drun-run.ok
index 587ab4d85f6..822bcbea212 100644
--- a/test/run-drun/ok/explicit-trap.drun-run.ok
+++ b/test/run-drun/ok/explicit-trap.drun-run.ok
@@ -1,3 +1,4 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: This is an explicit trap
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: This is an explicit trap.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/function-stabilization.drun.ok b/test/run-drun/ok/function-stabilization.drun.ok
new file mode 100644
index 00000000000..821b4dd65a9
--- /dev/null
+++ b/test/run-drun/ok/function-stabilization.drun.ok
@@ -0,0 +1,19 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/function-upgrades.drun.ok b/test/run-drun/ok/function-upgrades.drun.ok
index 60b4b27b770..b6f010cec85 100644
--- a/test/run-drun/ok/function-upgrades.drun.ok
+++ b/test/run-drun/ok/function-upgrades.drun.ok
@@ -3,7 +3,10 @@ ingress Completed: Reply: 0x4449444c0000
 ingress Completed: Reply: 0x4449444c0000
 ingress Completed: Reply: 0x4449444c0000
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/gc-trigger-acl.drun-run.ok b/test/run-drun/ok/gc-trigger-acl.drun-run.ok
index 52609530b06..c9b6f351cfc 100644
--- a/test/run-drun/ok/gc-trigger-acl.drun-run.ok
+++ b/test/run-drun/ok/gc-trigger-acl.drun-run.ok
@@ -2,5 +2,6 @@ ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a000000000000000001
 ingress Completed: Reply: 0x4449444c0000
 debug.print: Self.gc_trigger()
 debug.print: controlee.gc_trigger()
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: not a self-call or call from controller
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: not a self-call or call from controller.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/hashBlob.drun-run.ok b/test/run-drun/ok/hashBlob.drun-run.ok
new file mode 100644
index 00000000000..a0e0ba258ac
--- /dev/null
+++ b/test/run-drun/ok/hashBlob.drun-run.ok
@@ -0,0 +1,5 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 3_580_832_660
+debug.print: 3_580_832_661
+ingress Completed: Reply: 0x4449444c000179942b6fd5
diff --git a/test/run-drun/ok/hashBlob.run-ir.ok b/test/run-drun/ok/hashBlob.run-ir.ok
new file mode 100644
index 00000000000..9a4a410d26e
--- /dev/null
+++ b/test/run-drun/ok/hashBlob.run-ir.ok
@@ -0,0 +1,2 @@
+3_015_377_762
+3_015_377_763
diff --git a/test/run-drun/ok/hashBlob.run-low.ok b/test/run-drun/ok/hashBlob.run-low.ok
new file mode 100644
index 00000000000..9a4a410d26e
--- /dev/null
+++ b/test/run-drun/ok/hashBlob.run-low.ok
@@ -0,0 +1,2 @@
+3_015_377_762
+3_015_377_763
diff --git a/test/run-drun/ok/hashBlob.run.ok b/test/run-drun/ok/hashBlob.run.ok
new file mode 100644
index 00000000000..9a4a410d26e
--- /dev/null
+++ b/test/run-drun/ok/hashBlob.run.ok
@@ -0,0 +1,2 @@
+3_015_377_762
+3_015_377_763
diff --git a/test/run-drun/ok/idl-any-stable.drun-run.ok b/test/run-drun/ok/idl-any-stable.drun-run.ok
index 5b97455c400..711e36d55bf 100644
--- a/test/run-drun/ok/idl-any-stable.drun-run.ok
+++ b/test/run-drun/ok/idl-any-stable.drun-run.ok
@@ -1,3 +1,4 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
-Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: illegal type table
+Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: illegal type table.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/idl-bad.drun-run.ok b/test/run-drun/ok/idl-bad.drun-run.ok
index f00ef5f4d99..46940dc2aae 100644
--- a/test/run-drun/ok/idl-bad.drun-run.ok
+++ b/test/run-drun/ok/idl-bad.drun-run.ok
@@ -1,3 +1,4 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
-Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: missing magic bytes
+Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: missing magic bytes.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/idl-bool.drun-run.ok b/test/run-drun/ok/idl-bool.drun-run.ok
index 51793f447eb..ad760913944 100644
--- a/test/run-drun/ok/idl-bool.drun-run.ok
+++ b/test/run-drun/ok/idl-bool.drun-run.ok
@@ -2,4 +2,5 @@ ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a000000000000000001
 ingress Completed: Reply: 0x4449444c0000
 Ok: Reply: 0x4449444c00017e01
 Ok: Reply: 0x4449444c00017e00
-Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: byte tag not 0 or 1
+Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: byte tag not 0 or 1.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/idl-candid-config.drun-run.ok b/test/run-drun/ok/idl-candid-config.drun-run.ok
index 748fc2deaad..d0243b82771 100644
--- a/test/run-drun/ok/idl-candid-config.drun-run.ok
+++ b/test/run-drun/ok/idl-candid-config.drun-run.ok
@@ -1,2 +1,3 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: Candid limit denominator cannot be zero
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: Candid limit denominator cannot be zero.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/idl-large-principal.drun-run.ok b/test/run-drun/ok/idl-large-principal.drun-run.ok
index 6bb159959f3..60430f3be92 100644
--- a/test/run-drun/ok/idl-large-principal.drun-run.ok
+++ b/test/run-drun/ok/idl-large-principal.drun-run.ok
@@ -1,8 +1,11 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
 Ok: Reply: 0x4449444c0000
-Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: principal too long
+Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: principal too long.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 Ok: Reply: 0x4449444c0000
-Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: principal too long
+Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: principal too long.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 Ok: Reply: 0x4449444c0000
-Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: principal too long
+Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: principal too long.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/idl-nary.drun-run.ok b/test/run-drun/ok/idl-nary.drun-run.ok
index 302f030d3f9..4218d474335 100644
--- a/test/run-drun/ok/idl-nary.drun-run.ok
+++ b/test/run-drun/ok/idl-nary.drun-run.ok
@@ -8,6 +8,8 @@ Ok: Reply: 0x4449444c016c0400710171027103710100034f6e650354776f05546872656504466
 Ok: Reply: 0x4449444c016c0400710171027103710100034f6e650354776f05546872656504466f7572
 Ok: Reply: 0x4449444c016c0400710171027103710100034f6e650354776f05546872656504466f7572
 Ok: Reply: 0x4449444c016c0400710171027103710100034f6e650354776f05546872656504466f7572
-Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: too few arguments ttt
-Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: left-over bytes ttt
+Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: too few arguments ttt.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: left-over bytes ttt.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 Ok: Reply: 0x4449444c000471717171034f6e650354776f05546872656504466f7572
diff --git a/test/run-drun/ok/idl-record.drun-run.ok b/test/run-drun/ok/idl-record.drun-run.ok
index f5c618e1f21..3da3c149be1 100644
--- a/test/run-drun/ok/idl-record.drun-run.ok
+++ b/test/run-drun/ok/idl-record.drun-run.ok
@@ -8,4 +8,5 @@ debug.print: ok: +25
 Ok: Reply: 0x4449444c0000
 debug.print: ok:  Hey! +25
 Ok: Reply: 0x4449444c00017719
-Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: did not find field extra in record
+Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: did not find field extra in record.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/idl-spacebomb.drun-run.ok b/test/run-drun/ok/idl-spacebomb.drun-run.ok
index ba492848d38..c7ab165837e 100644
--- a/test/run-drun/ok/idl-spacebomb.drun-run.ok
+++ b/test/run-drun/ok/idl-spacebomb.drun-run.ok
@@ -1,73 +1,108 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
 debug.print: {function = "vec_null_extra_argument"; hex = "4449444C016D7F01008094EBDC03"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "vec_reserved_extra_argument"; hex = "4449444C016D7001008094EBDC03"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "zero_sized_record_extra_argument"; hex = "4449444C046C03007F010102026C0100706C006D0001038094EBDC03"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "vec_vec_null_extra_argument"; hex = "4449444C026D016D7F010005FFFF3FFFFF3FFFFF3FFFFF3FFFFF3F"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "vec_record_emp_extra_argument"; hex = "4449444C026D016C00010080ADE204"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "vec_opt_record_with_2_20_null_extra_argument"; hex = "4449444C176C02017F027F6C02010002006C02000101016C02000201026C02000301036C02000401046C02000501056C02000601066C02000701076C02000801086C02000901096C02000A010A6C02000B010B6C02000C010C6C02000D020D6C02000E010E6C02000F010F6C02001001106C02001101116C02001201126C02001301136E146D150116050101010101"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "vec_null_not_ignored"; hex = "4449444C016D7F01008094EBDC03"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "vec_null_not_ignored"; hex = "4449444C016D7F010080ADE204"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "vec_null_not_ignored"; hex = "4449444C016D7F0100FFFF3F"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "vec_null_not_ignored"; hex = "4449444C016D7F010080BF18"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "vec_reserved_not_ignored"; hex = "4449444C016D7001008094EBDC03"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "vec_reserved_not_ignored"; hex = "4449444C016D70010080ADE204"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "vec_reserved_not_ignored"; hex = "4449444C016D700100FFFF3F"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "vec_reserved_not_ignored"; hex = "4449444C016D70010080BF18"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "zero_sized_record_not_ignored"; hex = "4449444C046C03007F010102026C0100706C006D0001038094EBDC03"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "zero_sized_record_not_ignored"; hex = "4449444C046C03007F010102026C0100706C006D00010380ADE204"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "zero_sized_record_not_ignored"; hex = "4449444C046C03007F010102026C0100706C006D000103FFFF3F"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "zero_sized_record_not_ignored"; hex = "4449444C046C03007F010102026C0100706C006D00010380B518"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "vec_vec_null_not_ignored"; hex = "4449444C026D016D7F010005FFFF3FFFFF3FFFFF3FFFFF3FFFFF3F"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "vec_record_emp_not_ignored"; hex = "4449444C026D016C00010080ADE204"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "vec_null_subtyping"; hex = "4449444C016D7F01008094EBDC03"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "vec_null_subtyping"; hex = "4449444C016D7F010080ADE204"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "vec_null_subtyping"; hex = "4449444C016D7F0100FFFF3F"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "vec_null_subtyping"; hex = "4449444C016D7F010080BF18"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "vec_reserved_subtyping"; hex = "4449444C016D7001008094EBDC03"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "vec_reserved_subtyping"; hex = "4449444C016D70010080ADE204"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "vec_reserved_subtyping"; hex = "4449444C016D700100FFFF3F"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "vec_reserved_subtyping"; hex = "4449444C016D70010080BF18"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "zero_sized_record_subtyping"; hex = "4449444C046C03007F010102026C0100706C006D0001038094EBDC03"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "zero_sized_record_subtyping"; hex = "4449444C046C03007F010102026C0100706C006D00010380ADE204"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "zero_sized_record_subtyping"; hex = "4449444C046C03007F010102026C0100706C006D000103FFFF3F"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "zero_sized_record_subtyping"; hex = "4449444C046C03007F010102026C0100706C006D00010380B518"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "vec_vec_null_subtyping"; hex = "4449444C026D016D7F010005FFFF3FFFFF3FFFFF3FFFFF3FFFFF3F"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "vec_record_emp_subtyping"; hex = "4449444C026D016C00010080ADE204"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {function = "vec_opt_record_with_2_20_null_subtyping"; hex = "4449444C176C02017F027F6C02010002006C02000101016C02000201026C02000301036C02000401046C02000501056C02000601066C02000701076C02000801086C02000901096C02000A010A6C02000B010B6C02000C010C6C02000D020D6C02000E010E6C02000F010F6C02001001106C02001101116C02001201126C02001301136E146D150116050101010101"}
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/idl-sub-rec.drun-run.ok b/test/run-drun/ok/idl-sub-rec.drun-run.ok
index 88b93db6860..99e1c0ce8d0 100644
--- a/test/run-drun/ok/idl-sub-rec.drun-run.ok
+++ b/test/run-drun/ok/idl-sub-rec.drun-run.ok
@@ -10,13 +10,15 @@ debug.print: ok f0
 debug.print: ok_3
 debug.print: ok f0
 debug.print: ok_4
-debug.print: ok 5:IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: incompatible function type
+debug.print: ok 5:IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: incompatible function type.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: ok f1
 debug.print: ok_6
 debug.print: ok f1
 debug.print: ok f1
 debug.print: ok_8
-debug.print: ok_9IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: incompatible function type
+debug.print: ok_9IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: incompatible function type.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: ok f4
 debug.print: ok_10
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/idl-sub-variant.drun-run.ok b/test/run-drun/ok/idl-sub-variant.drun-run.ok
index 0eac0de659e..3a989f62591 100644
--- a/test/run-drun/ok/idl-sub-variant.drun-run.ok
+++ b/test/run-drun/ok/idl-sub-variant.drun-run.ok
@@ -2,5 +2,6 @@ ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a000000000000000001
 ingress Completed: Reply: 0x4449444c0000
 debug.print: pass1
 debug.print: pass1
-debug.print: pass3: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: unexpected variant tag
+debug.print: pass3: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: unexpected variant tag.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/idl-tuple.drun-run.ok b/test/run-drun/ok/idl-tuple.drun-run.ok
index 2ea2f0c2dbc..2d213abf3b7 100644
--- a/test/run-drun/ok/idl-tuple.drun-run.ok
+++ b/test/run-drun/ok/idl-tuple.drun-run.ok
@@ -5,5 +5,6 @@ Ok: Reply: 0x4449444c00037c7c7c050581848c20
 Ok: Reply: 0x4449444c00037c7c7c050581848c20
 Ok: Reply: 0x4449444c00037c7c7c050581848c20
 Ok: Reply: 0x4449444c00037c7c7c050581848c20
-Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: did not find tuple field in record
+Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: did not find tuple field in record.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 Ok: Reply: 0x4449444c00037c7c7c050581848c20
diff --git a/test/run-drun/ok/incremental-actor-class-stabilization.drun-run.ok b/test/run-drun/ok/incremental-actor-class-stabilization.drun-run.ok
new file mode 100644
index 00000000000..12c4878bd73
--- /dev/null
+++ b/test/run-drun/ok/incremental-actor-class-stabilization.drun-run.ok
@@ -0,0 +1,24 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 1_234_567_890_123_456_789_012_345_678_901_234_567_890
+debug.print: "Test actor"
+debug.print: 100_000
+debug.print: 1_234_567_890_123_456_789_012_345_678_901_234_567_890
+debug.print: -1_234_567_890_123_456_789_012_345_678_901_234_567_890
+debug.print: "Test actor"
+debug.print: 100_000
+debug.print: PRE-UPGRADE HOOK!
+debug.print: IC0503: Error from Canister rrkah-fqaaa-aaaaa-aaaaq-cai: Canister called `ic0.trap` with message: Messages are blocked during stabilization.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+debug.print: Upgrade
+debug.print: IC0503: Error from Canister rrkah-fqaaa-aaaaa-aaaaq-cai: Canister called `ic0.trap` with message: Messages are blocked during stabilization.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+debug.print: POST-UPGRADE HOOK!
+debug.print: 0
+debug.print: ""
+debug.print: 0
+debug.print: 1_234_567_890_123_456_789_012_345_678_901_234_567_890
+debug.print: -1_234_567_890_123_456_789_012_345_678_901_234_567_890
+debug.print: "Test actor"
+debug.print: 100_000
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/incremental-stabilization.drun-run.ok b/test/run-drun/ok/incremental-stabilization.drun-run.ok
new file mode 100644
index 00000000000..3179b2b3666
--- /dev/null
+++ b/test/run-drun/ok/incremental-stabilization.drun-run.ok
@@ -0,0 +1,36 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+debug.print: Initialize flexible Nat
+debug.print: Initialize stable Nat
+debug.print: Initialize stable Int
+debug.print: Initialize stable text
+debug.print: Initialize stable array
+debug.print: Initialize stable object
+debug.print: Initialize flexible text
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 1
+debug.print: "Flexible text"
+debug.print: 12_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890
+debug.print: -2_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890
+debug.print: "Motoko incremental graph-copy-based upgrade test"
+debug.print: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99]
+debug.print: {stableArray = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99]; stableInt = -2_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890; stableNat = 12_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890; stableText = "Motoko incremental graph-copy-based upgrade test"}
+ingress Completed: Reply: 0x4449444c0000
+debug.print: PRE-UPGRADE HOOK!
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: Messages are blocked during stabilization.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: Messages are blocked during stabilization.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+debug.print: Initialize flexible Nat
+debug.print: Initialize flexible text
+debug.print: POST-UPGRADE HOOK!
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 1
+debug.print: "Flexible text"
+debug.print: 12_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890
+debug.print: -2_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890
+debug.print: "Motoko incremental graph-copy-based upgrade test"
+debug.print: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99]
+debug.print: {stableArray = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99]; stableInt = -2_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890; stableNat = 12_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890; stableText = "Motoko incremental graph-copy-based upgrade test"}
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/issue-1847.drun-run.ok b/test/run-drun/ok/issue-1847.drun-run.ok
index e72ff1ec5dd..94acfc6ba06 100644
--- a/test/run-drun/ok/issue-1847.drun-run.ok
+++ b/test/run-drun/ok/issue-1847.drun-run.ok
@@ -1,2 +1,3 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: empty input. Expected Candid-encoded argument, but received a zero-length argument
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: empty input. Expected Candid-encoded argument, but received a zero-length argument.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/issue-4229.drun-run.ok b/test/run-drun/ok/issue-4229.drun-run.ok
index f179b049f47..c8eea548cae 100644
--- a/test/run-drun/ok/issue-4229.drun-run.ok
+++ b/test/run-drun/ok/issue-4229.drun-run.ok
@@ -1,3 +1,4 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 debug.print: "\44\49\44\4C\00\01\7F"
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: invalid type argument
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: invalid type argument.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/let-else-actor.drun-run.ok b/test/run-drun/ok/let-else-actor.drun-run.ok
index 6cc3b10e250..05b47775126 100644
--- a/test/run-drun/ok/let-else-actor.drun-run.ok
+++ b/test/run-drun/ok/let-else-actor.drun-run.ok
@@ -1,2 +1,3 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: x was null
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: x was null.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/live-upgrade.drun.ok b/test/run-drun/ok/live-upgrade.drun.ok
index 8480025fc17..42c2bcf5848 100644
--- a/test/run-drun/ok/live-upgrade.drun.ok
+++ b/test/run-drun/ok/live-upgrade.drun.ok
@@ -14,4 +14,5 @@ ingress Completed: Reply: 0x4449444c0000
 debug.print: {version = 2}
 ingress Completed: Reply: 0x4449444c0000
 ingress Completed: Reply: 0x4449444c0000
-Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: assertion failed at live-upgrade.mo:11.49-13.4
+Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: assertion failed at live-upgrade.mo:11.49-13.4.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/map-mixed-upgrades.drun.ok b/test/run-drun/ok/map-mixed-upgrades.drun.ok
new file mode 100644
index 00000000000..56fd3f61a0b
--- /dev/null
+++ b/test/run-drun/ok/map-mixed-upgrades.drun.ok
@@ -0,0 +1,181 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {node = 1; state = null; upgrades = 0}
+debug.print: 1: insert (1, "1")
+debug.print: 1: lookup 1
+debug.print: {node = 2; state = null; upgrades = 0}
+debug.print: 2: insert (2, "2")
+debug.print: 2: lookup 2
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {node = 1; state = ?(1, "1", null); upgrades = 1}
+debug.print: {node = 2; state = ?(2, "2", null); upgrades = 1}
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {node = 1; state = ?(1, "1", null); upgrades = 2}
+debug.print: {node = 2; state = ?(2, "2", null); upgrades = 2}
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reject: Invalid upgrade option: The `wasm_memory_persistence: opt Keep` upgrade option requires that the new canister module supports enhanced orthogonal persistence.
+debug.print: {node = 3; state = null; upgrades = 0}
+debug.print: 3: insert (3, "3")
+debug.print: 3: lookup 3
+debug.print: {node = 4; state = null; upgrades = 0}
+debug.print: 4: insert (4, "4")
+debug.print: 4: lookup 4
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {node = 1; state = ?(1, "1", null); upgrades = 3}
+debug.print: {node = 2; state = ?(2, "2", null); upgrades = 3}
+debug.print: {node = 3; state = ?(3, "3", null); upgrades = 1}
+debug.print: {node = 4; state = ?(4, "4", null); upgrades = 1}
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {node = 5; state = null; upgrades = 0}
+debug.print: 5: insert (5, "5")
+debug.print: 5: lookup 5
+debug.print: 5: remove 5
+debug.print: 5: lookup 5
+debug.print: 5: insert (5, "5")
+debug.print: 5: lookup 5
+debug.print: {node = 6; state = null; upgrades = 0}
+debug.print: 6: insert (6, "6")
+debug.print: 6: lookup 6
+debug.print: 6: remove 6
+debug.print: 6: lookup 6
+debug.print: 6: insert (6, "6")
+debug.print: 6: lookup 6
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {node = 1; state = ?(1, "1", null); upgrades = 4}
+debug.print: {node = 2; state = ?(2, "2", null); upgrades = 4}
+debug.print: {node = 3; state = ?(3, "3", null); upgrades = 2}
+debug.print: {node = 4; state = ?(4, "4", null); upgrades = 2}
+debug.print: {node = 5; state = ?(5, "5", null); upgrades = 1}
+debug.print: {node = 6; state = ?(6, "6", null); upgrades = 1}
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {node = 7; state = null; upgrades = 0}
+debug.print: 7: insert (7, "7")
+debug.print: 7: lookup 7
+debug.print: 7: remove 7
+debug.print: 7: lookup 7
+debug.print: 7: insert (7, "7")
+debug.print: 7: lookup 7
+debug.print: {node = 0; state = null; upgrades = 0}
+debug.print: 0: insert (8, "8")
+debug.print: 0: lookup 8
+debug.print: 0: remove 8
+debug.print: 0: lookup 8
+debug.print: 0: insert (8, "8")
+debug.print: 0: lookup 8
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {node = 1; state = ?(1, "1", null); upgrades = 5}
+debug.print: {node = 2; state = ?(2, "2", null); upgrades = 5}
+debug.print: {node = 3; state = ?(3, "3", null); upgrades = 3}
+debug.print: {node = 4; state = ?(4, "4", null); upgrades = 3}
+debug.print: {node = 5; state = ?(5, "5", null); upgrades = 2}
+debug.print: {node = 6; state = ?(6, "6", null); upgrades = 2}
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 1: lookup 9
+debug.print: 1: insert (9, "9")
+debug.print: 1: lookup 9
+debug.print: 1: remove 9
+debug.print: 1: lookup 9
+debug.print: 1: insert (9, "9")
+debug.print: 1: lookup 9
+debug.print: 2: lookup 10
+debug.print: 2: insert (10, "10")
+debug.print: 2: lookup 10
+debug.print: 2: remove 10
+debug.print: 2: lookup 10
+debug.print: 2: insert (10, "10")
+debug.print: 2: lookup 10
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {node = 1; state = ?(9, "9", ?(1, "1", null)); upgrades = 6}
+debug.print: {node = 2; state = ?(10, "10", ?(2, "2", null)); upgrades = 6}
+debug.print: {node = 3; state = ?(3, "3", null); upgrades = 4}
+debug.print: {node = 4; state = ?(4, "4", null); upgrades = 4}
+debug.print: {node = 5; state = ?(5, "5", null); upgrades = 3}
+debug.print: {node = 6; state = ?(6, "6", null); upgrades = 3}
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 3: lookup 11
+debug.print: 3: insert (11, "11")
+debug.print: 3: lookup 11
+debug.print: 3: remove 11
+debug.print: 3: lookup 11
+debug.print: 3: insert (11, "11")
+debug.print: 3: lookup 11
+debug.print: 4: lookup 12
+debug.print: 4: insert (12, "12")
+debug.print: 4: lookup 12
+debug.print: 4: remove 12
+debug.print: 4: lookup 12
+debug.print: 4: insert (12, "12")
+debug.print: 4: lookup 12
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {node = 1; state = ?(9, "9", ?(1, "1", null)); upgrades = 7}
+debug.print: {node = 2; state = ?(10, "10", ?(2, "2", null)); upgrades = 7}
+debug.print: {node = 3; state = ?(11, "11", ?(3, "3", null)); upgrades = 5}
+debug.print: {node = 4; state = ?(12, "12", ?(4, "4", null)); upgrades = 5}
+debug.print: {node = 5; state = ?(5, "5", null); upgrades = 4}
+debug.print: {node = 6; state = ?(6, "6", null); upgrades = 4}
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 5: lookup 13
+debug.print: 5: insert (13, "13")
+debug.print: 5: lookup 13
+debug.print: 5: remove 13
+debug.print: 5: lookup 13
+debug.print: 5: insert (13, "13")
+debug.print: 5: lookup 13
+debug.print: 6: lookup 14
+debug.print: 6: insert (14, "14")
+debug.print: 6: lookup 14
+debug.print: 6: remove 14
+debug.print: 6: lookup 14
+debug.print: 6: insert (14, "14")
+debug.print: 6: lookup 14
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0504: Missing upgrade option: Enhanced orthogonal persistence requires the `wasm_memory_persistence` upgrade option.
+debug.print: {node = 1; state = ?(9, "9", ?(1, "1", null)); upgrades = 8}
+debug.print: {node = 2; state = ?(10, "10", ?(2, "2", null)); upgrades = 8}
+debug.print: {node = 3; state = ?(11, "11", ?(3, "3", null)); upgrades = 6}
+debug.print: {node = 4; state = ?(12, "12", ?(4, "4", null)); upgrades = 6}
+debug.print: {node = 5; state = ?(13, "13", ?(5, "5", null)); upgrades = 5}
+debug.print: {node = 6; state = ?(14, "14", ?(6, "6", null)); upgrades = 5}
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 7: lookup 15
+debug.print: 7: insert (15, "15")
+debug.print: 7: lookup 15
+debug.print: 7: remove 15
+debug.print: 7: lookup 15
+debug.print: 7: insert (15, "15")
+debug.print: 7: lookup 15
+debug.print: 0: lookup 16
+debug.print: 0: insert (16, "16")
+debug.print: 0: lookup 16
+debug.print: 0: remove 16
+debug.print: 0: lookup 16
+debug.print: 0: insert (16, "16")
+debug.print: 0: lookup 16
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {node = 1; state = null; upgrades = 0}
+debug.print: {node = 2; state = null; upgrades = 0}
+debug.print: {node = 3; state = null; upgrades = 0}
+debug.print: {node = 4; state = null; upgrades = 0}
+debug.print: {node = 5; state = null; upgrades = 0}
+debug.print: {node = 6; state = null; upgrades = 0}
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 1: lookup 17
+debug.print: 1: insert (17, "17")
+debug.print: 1: lookup 17
+debug.print: 1: remove 17
+debug.print: 1: lookup 17
+debug.print: 1: insert (17, "17")
+debug.print: 1: lookup 17
+debug.print: 2: lookup 18
+debug.print: 2: insert (18, "18")
+debug.print: 2: lookup 18
+debug.print: 2: remove 18
+debug.print: 2: lookup 18
+debug.print: 2: insert (18, "18")
+debug.print: 2: lookup 18
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/map-stabilization.drun.ok b/test/run-drun/ok/map-stabilization.drun.ok
new file mode 100644
index 00000000000..ae61f06bb3d
--- /dev/null
+++ b/test/run-drun/ok/map-stabilization.drun.ok
@@ -0,0 +1,67 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {node = 1; state = null; upgrades = 0}
+debug.print: 1: insert (1, "1")
+debug.print: 1: lookup 1
+debug.print: {node = 2; state = null; upgrades = 0}
+debug.print: 2: insert (2, "2")
+debug.print: 2: lookup 2
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {node = 1; state = ?(1, "1", null); upgrades = 1}
+debug.print: {node = 2; state = ?(2, "2", null); upgrades = 1}
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {node = 3; state = null; upgrades = 0}
+debug.print: 3: insert (3, "3")
+debug.print: 3: lookup 3
+debug.print: {node = 4; state = null; upgrades = 0}
+debug.print: 4: insert (4, "4")
+debug.print: 4: lookup 4
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {node = 1; state = ?(1, "1", null); upgrades = 2}
+debug.print: {node = 2; state = ?(2, "2", null); upgrades = 2}
+debug.print: {node = 3; state = ?(3, "3", null); upgrades = 1}
+debug.print: {node = 4; state = ?(4, "4", null); upgrades = 1}
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {node = 5; state = null; upgrades = 0}
+debug.print: 5: insert (5, "5")
+debug.print: 5: lookup 5
+debug.print: 5: remove 5
+debug.print: 5: lookup 5
+debug.print: 5: insert (5, "5")
+debug.print: 5: lookup 5
+debug.print: {node = 6; state = null; upgrades = 0}
+debug.print: 6: insert (6, "6")
+debug.print: 6: lookup 6
+debug.print: 6: remove 6
+debug.print: 6: lookup 6
+debug.print: 6: insert (6, "6")
+debug.print: 6: lookup 6
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {node = 1; state = ?(1, "1", null); upgrades = 3}
+debug.print: {node = 2; state = ?(2, "2", null); upgrades = 3}
+debug.print: {node = 3; state = ?(3, "3", null); upgrades = 2}
+debug.print: {node = 4; state = ?(4, "4", null); upgrades = 2}
+debug.print: {node = 5; state = ?(5, "5", null); upgrades = 1}
+debug.print: {node = 6; state = ?(6, "6", null); upgrades = 1}
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {node = 7; state = null; upgrades = 0}
+debug.print: 7: insert (7, "7")
+debug.print: 7: lookup 7
+debug.print: 7: remove 7
+debug.print: 7: lookup 7
+debug.print: 7: insert (7, "7")
+debug.print: 7: lookup 7
+debug.print: {node = 0; state = null; upgrades = 0}
+debug.print: 0: insert (8, "8")
+debug.print: 0: lookup 8
+debug.print: 0: remove 8
+debug.print: 0: lookup 8
+debug.print: 0: insert (8, "8")
+debug.print: 0: lookup 8
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/max-stack-variant.drun-run.ok b/test/run-drun/ok/max-stack-variant.drun-run.ok
index 62b1197b109..01975521fe6 100644
--- a/test/run-drun/ok/max-stack-variant.drun-run.ok
+++ b/test/run-drun/ok/max-stack-variant.drun-run.ok
@@ -1,9 +1,11 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
-debug.print: IC0502: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped: stack overflow
+debug.print: IC0502: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister trapped: stack overflow.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped
 debug.print: serialized
 ingress Completed: Reply: 0x4449444c0000
-debug.print: IC0502: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped: stack overflow
+debug.print: IC0502: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister trapped: stack overflow.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped
 debug.print: serialized
 debug.print: deserialized
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/max-stack.drun-run.ok b/test/run-drun/ok/max-stack.drun-run.ok
index 62b1197b109..01975521fe6 100644
--- a/test/run-drun/ok/max-stack.drun-run.ok
+++ b/test/run-drun/ok/max-stack.drun-run.ok
@@ -1,9 +1,11 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
-debug.print: IC0502: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped: stack overflow
+debug.print: IC0502: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister trapped: stack overflow.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped
 debug.print: serialized
 ingress Completed: Reply: 0x4449444c0000
-debug.print: IC0502: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped: stack overflow
+debug.print: IC0502: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister trapped: stack overflow.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped
 debug.print: serialized
 debug.print: deserialized
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/migrate-candid-sharing.drun.ok b/test/run-drun/ok/migrate-candid-sharing.drun.ok
new file mode 100644
index 00000000000..f2cb75352e9
--- /dev/null
+++ b/test/run-drun/ok/migrate-candid-sharing.drun.ok
@@ -0,0 +1,8 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+debug.print: INITIALIZED: 100
+ingress Completed: Reply: 0x4449444c0000
+debug.print: INITIALIZED: 100
+ingress Completed: Reply: 0x4449444c0000
+debug.print: INITIALIZED: 100
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0504: Missing upgrade option: Enhanced orthogonal persistence requires the `wasm_memory_persistence` upgrade option.
diff --git a/test/run-drun/ok/migration-paths.drun.ok b/test/run-drun/ok/migration-paths.drun.ok
new file mode 100644
index 00000000000..4c171e8e096
--- /dev/null
+++ b/test/run-drun/ok/migration-paths.drun.ok
@@ -0,0 +1,28 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+debug.print: array initialized
+debug.print: version: 1
+debug.print: Installed
+ingress Completed: Reply: 0x4449444c0000
+debug.print: version: 2
+debug.print: Upgraded (default)
+ingress Completed: Reply: 0x4449444c0000
+debug.print: version: 3
+debug.print: Upgraded (replace main memory)
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reject: Invalid upgrade option: The `wasm_memory_persistence: opt Keep` upgrade option requires that the new canister module supports enhanced orthogonal persistence.
+ingress Completed: Reply: 0x4449444c0000
+debug.print: version: 4
+debug.print: Upgraded (keep main memory)
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Graph copy
+ingress Completed: Reply: 0x4449444c0000
+debug.print: version: 5
+debug.print: Upgraded (replace main memory)
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Graph copy
+ingress Completed: Reply: 0x4449444c0000
+debug.print: version: 6
+debug.print: Upgraded (keep main memory)
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0504: Missing upgrade option: Enhanced orthogonal persistence requires the `wasm_memory_persistence` upgrade option.
diff --git a/test/run-drun/ok/more-function-stabilization.drun.ok b/test/run-drun/ok/more-function-stabilization.drun.ok
new file mode 100644
index 00000000000..3f0c0c7fc33
--- /dev/null
+++ b/test/run-drun/ok/more-function-stabilization.drun.ok
@@ -0,0 +1,10 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/more-function-upgrades.drun.ok b/test/run-drun/ok/more-function-upgrades.drun.ok
index c46182ff707..e565700968b 100644
--- a/test/run-drun/ok/more-function-upgrades.drun.ok
+++ b/test/run-drun/ok/more-function-upgrades.drun.ok
@@ -1,5 +1,7 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/non-incremental-stabilization.drun-run.ok b/test/run-drun/ok/non-incremental-stabilization.drun-run.ok
new file mode 100644
index 00000000000..be4895eb527
--- /dev/null
+++ b/test/run-drun/ok/non-incremental-stabilization.drun-run.ok
@@ -0,0 +1,29 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 12_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890
+debug.print: -2_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890
+debug.print: "Motoko graph-copy-based upgrade test"
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reject: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: No destabilization needed.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+debug.print: PRE-UPGRADE HOOK!
+debug.print: POST-UPGRADE HOOK!
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reject: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: No destabilization needed.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+debug.print: 12_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890
+debug.print: -2_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890
+debug.print: "Motoko graph-copy-based upgrade test"
+ingress Completed: Reply: 0x4449444c0000
+debug.print: PRE-UPGRADE HOOK!
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reject: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: No destabilization needed.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: Messages are blocked during stabilization.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+debug.print: POST-UPGRADE HOOK!
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 12_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890
+debug.print: -2_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890
+debug.print: "Motoko graph-copy-based upgrade test"
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/oneshot-callbacks.drun-run.ok b/test/run-drun/ok/oneshot-callbacks.drun-run.ok
index 0655d78547d..2b16903da1c 100644
--- a/test/run-drun/ok/oneshot-callbacks.drun-run.ok
+++ b/test/run-drun/ok/oneshot-callbacks.drun-run.ok
@@ -49,7 +49,8 @@ debug.print: ping! 1
 debug.print: go 1: 1
 debug.print: ping-async! 1
 debug.print: go 2: 1
-ingress Err: IC0502: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped: integer division by 0
+ingress Err: IC0502: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister trapped: integer division by 0.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped
 debug.print: go 0: 0
 debug.print: ping! 1
 debug.print: go 1: 1
@@ -61,4 +62,5 @@ debug.print: ping! 1
 debug.print: go 1: 1
 debug.print: ping-async! 1
 debug.print: go 2: 1
-ingress Err: IC0502: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped: integer division by 0
+ingress Err: IC0502: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister trapped: integer division by 0.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped
diff --git a/test/run-drun/ok/query-footprint-overflow.drun-run.ok b/test/run-drun/ok/query-footprint-overflow.drun-run.ok
index c12bbde1992..6ea5f745be5 100644
--- a/test/run-drun/ok/query-footprint-overflow.drun-run.ok
+++ b/test/run-drun/ok/query-footprint-overflow.drun-run.ok
@@ -1,3 +1,4 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
-ingress Completed: Reject: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: Deprecated with enhanced orthogonal persistence
+ingress Completed: Reject: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: buffer_size overflow.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/query-footprint-overflow.tc.ok b/test/run-drun/ok/query-footprint-overflow.tc.ok
index 83fd068bb6b..a366394a522 100644
--- a/test/run-drun/ok/query-footprint-overflow.tc.ok
+++ b/test/run-drun/ok/query-footprint-overflow.tc.ok
@@ -1 +1 @@
-query-footprint-overflow.mo:21.16-21.20: warning [M0194], unused identifier expl (delete or rename to wildcard `_` or `_expl`)
+query-footprint-overflow.mo:22.16-22.20: warning [M0194], unused identifier expl (delete or rename to wildcard `_` or `_expl`)
diff --git a/test/run-drun/ok/query-footprint.drun-run.ok b/test/run-drun/ok/query-footprint.drun-run.ok
index e29028fdae6..e9dfaac3f24 100644
--- a/test/run-drun/ok/query-footprint.drun-run.ok
+++ b/test/run-drun/ok/query-footprint.drun-run.ok
@@ -1,6 +1,6 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: Deprecated with enhanced orthogonal persistence
-Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: Deprecated with enhanced orthogonal persistence
-ingress Completed: Reject: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: Deprecated with enhanced orthogonal persistence
-ingress Completed: Reject: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: Deprecated with enhanced orthogonal persistence
+ingress Completed: Reply: 0x4449444c016c01c1c1cee2047801005118000000000000
+Ok: Reply: 0x4449444c016c01c1c1cee2047801005118000000000000
+ingress Completed: Reply: 0x4449444c0001785118000000000000
+ingress Completed: Reply: 0x4449444c0001785118000000000000
diff --git a/test/run-drun/ok/query-footprint.tc.ok b/test/run-drun/ok/query-footprint.tc.ok
index 32739f36f76..165902e9fc5 100644
--- a/test/run-drun/ok/query-footprint.tc.ok
+++ b/test/run-drun/ok/query-footprint.tc.ok
@@ -1,3 +1,3 @@
-query-footprint.mo:4.16-4.17: warning [M0194], unused identifier s (delete or rename to wildcard `_` or `_s`)
-query-footprint.mo:6.9-6.13: warning [M0194], unused identifier lost (delete or rename to wildcard `_` or `_lost`)
-query-footprint.mo:18.16-18.20: warning [M0194], unused identifier expl (delete or rename to wildcard `_` or `_expl`)
+query-footprint.mo:5.16-5.17: warning [M0194], unused identifier s (delete or rename to wildcard `_` or `_s`)
+query-footprint.mo:7.9-7.13: warning [M0194], unused identifier lost (delete or rename to wildcard `_` or `_lost`)
+query-footprint.mo:19.16-19.20: warning [M0194], unused identifier expl (delete or rename to wildcard `_` or `_expl`)
diff --git a/test/run-drun/ok/query-size-overflow.drun-run.ok b/test/run-drun/ok/query-size-overflow.drun-run.ok
index dacdff32217..5592415675d 100644
--- a/test/run-drun/ok/query-size-overflow.drun-run.ok
+++ b/test/run-drun/ok/query-size-overflow.drun-run.ok
@@ -1,4 +1,6 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
-Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: buffer_size overflow
-Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: buffer_size overflow
+Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: buffer_size overflow.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: buffer_size overflow.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/region-test.drun-run.ok b/test/run-drun/ok/region-test.drun-run.ok
index 52c2beae4df..e29b620dec0 100644
--- a/test/run-drun/ok/region-test.drun-run.ok
+++ b/test/run-drun/ok/region-test.drun-run.ok
@@ -9,4 +9,5 @@ debug.print: Int32
 debug.print: Int64
 debug.print: Float
 debug.print: Blob
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: Done (rolling back changes with trap)
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: Done (rolling back changes with trap).
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/region0-overflow.drun-run.ok b/test/run-drun/ok/region0-overflow.drun-run.ok
index 2048ea0a7e3..17b7b9aa288 100644
--- a/test/run-drun/ok/region0-overflow.drun-run.ok
+++ b/test/run-drun/ok/region0-overflow.drun-run.ok
@@ -1,9 +1,15 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: StableMemory offset out of bounds
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: StableMemory range out of bounds
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: StableMemory range out of bounds
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: StableMemory range out of bounds
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: StableMemory offset out of bounds
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: StableMemory offset out of bounds
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: StableMemory offset out of bounds.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: StableMemory range out of bounds.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: StableMemory range out of bounds.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: StableMemory range out of bounds.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: StableMemory offset out of bounds.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: StableMemory offset out of bounds.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/region0-rts-stats.drun-run.ok b/test/run-drun/ok/region0-rts-stats.drun-run.ok
index 9f22d7475b0..15fae421812 100644
--- a/test/run-drun/ok/region0-rts-stats.drun-run.ok
+++ b/test/run-drun/ok/region0-rts-stats.drun-run.ok
@@ -1,10 +1,13 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
-debug.print: {l1 = 6; s1 = 6}
+debug.print: Ignore Diff: (ignored)
 debug.print: {l2 = 144; s2 = 144}
 ingress Completed: Reply: 0x4449444c0000
-debug.print: {l1 = 144; s1 = 145}
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: pattern failed
-debug.print: {l1 = 144; s1 = 145}
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: pattern failed
-debug.print: {l1 = 144; s1 = 145}
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: pattern failed
+debug.print: Ignore Diff: (ignored)
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: pattern failed.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+debug.print: Ignore Diff: (ignored)
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: pattern failed.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+debug.print: Ignore Diff: (ignored)
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: pattern failed.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/region0-rts-stats.tc.ok b/test/run-drun/ok/region0-rts-stats.tc.ok
index e2aaac24ac8..14c6603b4cd 100644
--- a/test/run-drun/ok/region0-rts-stats.tc.ok
+++ b/test/run-drun/ok/region0-rts-stats.tc.ok
@@ -1,7 +1,7 @@
 stable-mem/StableMemory.mo:5.21-5.42: warning [M0199], this code is (or uses) the deprecated library `ExperimentalStableMemory`.
 Please use the `Region` library instead: https://internetcomputer.org/docs/current/motoko/main/stable-memory/stable-regions/#the-region-library or compile with flag `--experimental-stable-memory 1` to suppress this message.
-region0-rts-stats.mo:8.7-8.8: warning [M0145], this pattern of type
+region0-rts-stats.mo:10.7-10.8: warning [M0145], this pattern of type
   Nat64
 does not cover value
   1 or 2 or _
-region0-rts-stats.mo:9.14-9.15: warning [M0194], unused identifier v (delete or rename to wildcard `_` or `_v`)
+region0-rts-stats.mo:11.14-11.15: warning [M0194], unused identifier v (delete or rename to wildcard `_` or `_v`)
diff --git a/test/run-drun/ok/rts-stats2.drun-run.ok b/test/run-drun/ok/rts-stats2-classical.drun-run.ok
similarity index 100%
rename from test/run-drun/ok/rts-stats2.drun-run.ok
rename to test/run-drun/ok/rts-stats2-classical.drun-run.ok
diff --git a/test/run-drun/ok/rts-stats2-enhanced.drun-run.ok b/test/run-drun/ok/rts-stats2-enhanced.drun-run.ok
new file mode 100644
index 00000000000..6fd260403f7
--- /dev/null
+++ b/test/run-drun/ok/rts-stats2-enhanced.drun-run.ok
@@ -0,0 +1,16 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Ignore Diff: (ignored)
+debug.print: Ignore Diff: (ignored)
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Ignore Diff: (ignored)
+debug.print: Ignore Diff: (ignored)
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Ignore Diff: (ignored)
+debug.print: Ignore Diff: (ignored)
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Ignore Diff: (ignored)
+debug.print: Ignore Diff: (ignored)
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/runtime-info-acl.drun-run.ok b/test/run-drun/ok/runtime-info-acl.drun-run.ok
index 2dfb0fa9811..886d1085fbf 100644
--- a/test/run-drun/ok/runtime-info-acl.drun-run.ok
+++ b/test/run-drun/ok/runtime-info-acl.drun-run.ok
@@ -2,5 +2,6 @@ ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a000000000000000001
 ingress Completed: Reply: 0x4449444c0000
 debug.print: Self information okay
 debug.print: Controllee information okay
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: Unauthorized call of __motoko_runtime_information
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: Unauthorized call of __motoko_runtime_information.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/selftail.drun-run.ok b/test/run-drun/ok/selftail.drun-run.ok
index a8e082b05bf..51df7f8b742 100644
--- a/test/run-drun/ok/selftail.drun-run.ok
+++ b/test/run-drun/ok/selftail.drun-run.ok
@@ -1,4 +1,5 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 debug.print: ok1
 debug.print: ok2
-ingress Err: IC0502: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped: stack overflow
+ingress Err: IC0502: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister trapped: stack overflow.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped
diff --git a/test/run-drun/ok/send-failure-example-trap.drun-run.ok b/test/run-drun/ok/send-failure-example-trap.drun-run.ok
index ec016e3da5a..6be0e978269 100644
--- a/test/run-drun/ok/send-failure-example-trap.drun-run.ok
+++ b/test/run-drun/ok/send-failure-example-trap.drun-run.ok
@@ -1,8 +1,10 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: could not perform remote call
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: could not perform remote call.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {count = 0}
 ingress Completed: Reply: 0x4449444c00017d00
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: could not perform remote call
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: could not perform remote call.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {count = 0}
 ingress Completed: Reply: 0x4449444c00017d00
diff --git a/test/run-drun/ok/stabilization-authorization.drun-run.ok b/test/run-drun/ok/stabilization-authorization.drun-run.ok
new file mode 100644
index 00000000000..2099eaba76e
--- /dev/null
+++ b/test/run-drun/ok/stabilization-authorization.drun-run.ok
@@ -0,0 +1,17 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Test __motoko_stabilize_before_upgrade
+debug.print: IC0503: Error from Canister rrkah-fqaaa-aaaaa-aaaaq-cai: Canister called `ic0.trap` with message: not a self-call or call from controller.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+debug.print: Test __motoko_stabilize_after_upgrade
+debug.print: IC0503: Error from Canister rrkah-fqaaa-aaaaa-aaaaq-cai: Canister called `ic0.trap` with message: not a self-call or call from controller.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+debug.print: Test upgrade
+debug.print: PRE-UPGRADE HOOK!
+debug.print: Test __motoko_stabilize_before_upgrade
+debug.print: IC0503: Error from Canister rrkah-fqaaa-aaaaa-aaaaq-cai: Canister called `ic0.trap` with message: not a self-call or call from controller.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+debug.print: Test __motoko_stabilize_after_upgrade
+debug.print: IC0503: Error from Canister rrkah-fqaaa-aaaaa-aaaaq-cai: Canister called `ic0.trap` with message: not a self-call or call from controller.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/stabilization-instructions.drun-run.ok b/test/run-drun/ok/stabilization-instructions.drun-run.ok
new file mode 100644
index 00000000000..2938d96fdea
--- /dev/null
+++ b/test/run-drun/ok/stabilization-instructions.drun-run.ok
@@ -0,0 +1,15 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+debug.print: Ignore Diff: (ignored)
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Ignore Diff: (ignored)
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Ignore Diff: (ignored)
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Ignore Diff: (ignored)
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/stabilization-upgrade-hooks.drun.ok b/test/run-drun/ok/stabilization-upgrade-hooks.drun.ok
new file mode 100644
index 00000000000..3f85370202b
--- /dev/null
+++ b/test/run-drun/ok/stabilization-upgrade-hooks.drun.ok
@@ -0,0 +1,33 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+debug.print: init'ed
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: init'ed 0
+debug.print: postupgrade 0
+ingress Completed: Reply: 0x4449444c0000
+debug.print: a
+Ok: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: aa
+Ok: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: preupgrade 0
+ingress Completed: Reply: 0x4449444c0000
+debug.print: init'ed 1
+debug.print: postupgrade 1
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 3
+debug.print: aaa
+Ok: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 4
+debug.print: aaa
+Ok: Reply: 0x4449444c0000
+debug.print: preupgrade 1
+ingress Completed: Reply: 0x4449444c0000
+debug.print: init'ed 2
+debug.print: postupgrade 2
+ingress Completed: Reply: 0x4449444c0000
+Ok: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+Ok: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/stabilize-beyond-stable-limit.drun-run.ok b/test/run-drun/ok/stabilize-beyond-stable-limit.drun-run.ok
new file mode 100644
index 00000000000..59abbe37c68
--- /dev/null
+++ b/test/run-drun/ok/stabilize-beyond-stable-limit.drun-run.ok
@@ -0,0 +1,19 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+debug.print: growing stable memory
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 65_536
+ingress Completed: Reply: 0x4449444c0000
+debug.print: PRE-UPGRADE HOOK!
+ingress Completed: Reply: 0x4449444c0000
+debug.print: POST-UPGRADE HOOK!
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 65_536
+ingress Completed: Reply: 0x4449444c0000
+debug.print: PRE-UPGRADE HOOK!
+ingress Completed: Reply: 0x4449444c0000
+debug.print: POST-UPGRADE HOOK!
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 65_536
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/stabilize-beyond-stable-limit.tc.ok b/test/run-drun/ok/stabilize-beyond-stable-limit.tc.ok
new file mode 100644
index 00000000000..d660c81190b
--- /dev/null
+++ b/test/run-drun/ok/stabilize-beyond-stable-limit.tc.ok
@@ -0,0 +1,2 @@
+stabilize-beyond-stable-limit.mo:13.9-13.30: warning [M0199], this code is (or uses) the deprecated library `ExperimentalStableMemory`.
+Please use the `Region` library instead: https://internetcomputer.org/docs/current/motoko/main/stable-memory/stable-regions/#the-region-library or compile with flag `--experimental-stable-memory 1` to suppress this message.
diff --git a/test/run-drun/ok/stabilize-bignat-bigint.drun.ok b/test/run-drun/ok/stabilize-bignat-bigint.drun.ok
new file mode 100644
index 00000000000..5782de05de5
--- /dev/null
+++ b/test/run-drun/ok/stabilize-bignat-bigint.drun.ok
@@ -0,0 +1,31 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 12_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_891
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 12_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_892
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 12_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_893
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: +12_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_891
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: +12_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_889
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: +12_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_887
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/stabilize-bignums.drun-run.ok b/test/run-drun/ok/stabilize-bignums.drun-run.ok
new file mode 100644
index 00000000000..119f3e25520
--- /dev/null
+++ b/test/run-drun/ok/stabilize-bignums.drun-run.ok
@@ -0,0 +1,29 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 12_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890
+debug.print: -12_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 24_691_357_802_469_135_780_246_913_578_024_691_357_802_469_135_780_246_913_578_024_691_357_802_469_135_781
+debug.print: -24_691_357_802_469_135_780_246_913_578_024_691_357_802_469_135_780_246_913_578_024_691_357_802_469_135_781
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 49_382_715_604_938_271_560_493_827_156_049_382_715_604_938_271_560_493_827_156_049_382_715_604_938_271_563
+debug.print: -49_382_715_604_938_271_560_493_827_156_049_382_715_604_938_271_560_493_827_156_049_382_715_604_938_271_563
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 49_382_715_604_938_271_560_493_827_156_049_382_715_604_938_271_560_493_827_156_049_382_715_604_938_271_563
+debug.print: -49_382_715_604_938_271_560_493_827_156_049_382_715_604_938_271_560_493_827_156_049_382_715_604_938_271_563
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 49_382_715_604_938_271_560_493_827_156_049_382_715_604_938_271_560_493_827_156_049_382_715_604_938_271_563
+debug.print: -49_382_715_604_938_271_560_493_827_156_049_382_715_604_938_271_560_493_827_156_049_382_715_604_938_271_563
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/stabilize-blob-iter.drun.ok b/test/run-drun/ok/stabilize-blob-iter.drun.ok
new file mode 100644
index 00000000000..224f84257a0
--- /dev/null
+++ b/test/run-drun/ok/stabilize-blob-iter.drun.ok
@@ -0,0 +1,21 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Err: IC0537: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Attempted to execute a message, but the canister contains no Wasm module..
+Please install code to this canister before calling it. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#wasm-module-not-found
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {stableField = "Version 0"}
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {stableField = "Version 0"}
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {}
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {}
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/stabilize-boxed-numbers.drun-run.ok b/test/run-drun/ok/stabilize-boxed-numbers.drun-run.ok
new file mode 100644
index 00000000000..6b50312c8d9
--- /dev/null
+++ b/test/run-drun/ok/stabilize-boxed-numbers.drun-run.ok
@@ -0,0 +1,27 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 4_294_967_295
+debug.print: -2_147_483_648
+debug.print: 18_446_744_073_709_551_614
+debug.print: -9_223_372_036_854_775_808
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 4_294_967_295
+debug.print: -2_147_483_648
+debug.print: 18_446_744_073_709_551_614
+debug.print: -9_223_372_036_854_775_808
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 4_294_967_295
+debug.print: -2_147_483_648
+debug.print: 18_446_744_073_709_551_614
+debug.print: -9_223_372_036_854_775_808
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 4_294_967_295
+debug.print: -2_147_483_648
+debug.print: 18_446_744_073_709_551_614
+debug.print: -9_223_372_036_854_775_808
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/stabilize-generics.drun.ok b/test/run-drun/ok/stabilize-generics.drun.ok
new file mode 100644
index 00000000000..d748a1a6c39
--- /dev/null
+++ b/test/run-drun/ok/stabilize-generics.drun.ok
@@ -0,0 +1,29 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+debug.print: (([0], [+1]), true)
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: (([0], [+1]), true)
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: (([0], [+1]), true)
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: (([0], [+1]), true)
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: (([0], [+1]), true)
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: (([0], [+1]), true)
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: (([0], [+1]), true)
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/stabilize-large-array.drun-run.ok b/test/run-drun/ok/stabilize-large-array.drun-run.ok
new file mode 100644
index 00000000000..7cab37352e1
--- /dev/null
+++ b/test/run-drun/ok/stabilize-large-array.drun-run.ok
@@ -0,0 +1,11 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Array of length 100_000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: PRE-UPGRADE HOOK!
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: POST-UPGRADE HOOK!
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Array of length 100_000
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/stabilize-mutable-array.drun.ok b/test/run-drun/ok/stabilize-mutable-array.drun.ok
new file mode 100644
index 00000000000..99aa98c27bb
--- /dev/null
+++ b/test/run-drun/ok/stabilize-mutable-array.drun.ok
@@ -0,0 +1,14 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+debug.print: firstField=0
+debug.print: secondField=0
+debug.print: thirdField=0
+debug.print: firstField=0
+debug.print: secondField=0
+debug.print: thirdField=0
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: Messages are blocked during stabilization.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/stabilize-mutable-service.drun.ok b/test/run-drun/ok/stabilize-mutable-service.drun.ok
new file mode 100644
index 00000000000..b1142cd4271
--- /dev/null
+++ b/test/run-drun/ok/stabilize-mutable-service.drun.ok
@@ -0,0 +1,30 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: original test0
+debug.print: original test1
+debug.print: original test2
+debug.print: original test3
+debug.print: original test0
+debug.print: original test1
+debug.print: original test2
+debug.print: original test3
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: original test0
+debug.print: original test1
+debug.print: original test2
+debug.print: original test3
+debug.print: original test0
+debug.print: original test1
+debug.print: original test2
+debug.print: original test3
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: Messages are blocked during stabilization.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: Messages are blocked during stabilization.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/stabilize-mutable-variant.drun.ok b/test/run-drun/ok/stabilize-mutable-variant.drun.ok
new file mode 100644
index 00000000000..32ba42ac451
--- /dev/null
+++ b/test/run-drun/ok/stabilize-mutable-variant.drun.ok
@@ -0,0 +1,10 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+debug.print: instance=[var #Option1]
+debug.print: alias=[var #Option1]
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: Messages are blocked during stabilization.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/stabilize-mutable.drun.ok b/test/run-drun/ok/stabilize-mutable.drun.ok
new file mode 100644
index 00000000000..13c7629be32
--- /dev/null
+++ b/test/run-drun/ok/stabilize-mutable.drun.ok
@@ -0,0 +1,20 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: [var {key = 2; value = "2"}]
+debug.print: [var {key = 2; value = "2"}]
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: [var {key = 2; value = "2"}]
+debug.print: [var {key = 2; value = "2"}]
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: [var {key = 2; value = "2"}]
+debug.print: [var {key = 2; value = "2"}]
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/stabilize-nat-to-int.drun.ok b/test/run-drun/ok/stabilize-nat-to-int.drun.ok
new file mode 100644
index 00000000000..c4fc6ba9378
--- /dev/null
+++ b/test/run-drun/ok/stabilize-nat-to-int.drun.ok
@@ -0,0 +1,25 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Err: IC0537: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Attempted to execute a message, but the canister contains no Wasm module..
+Please install code to this canister before calling it. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#wasm-module-not-found
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 1
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 2
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 0
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: -2
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/stabilize-non-stable.drun.ok b/test/run-drun/ok/stabilize-non-stable.drun.ok
new file mode 100644
index 00000000000..224f84257a0
--- /dev/null
+++ b/test/run-drun/ok/stabilize-non-stable.drun.ok
@@ -0,0 +1,21 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Err: IC0537: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Attempted to execute a message, but the canister contains no Wasm module..
+Please install code to this canister before calling it. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#wasm-module-not-found
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {stableField = "Version 0"}
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {stableField = "Version 0"}
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {}
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {}
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/stabilize-optional.drun.ok b/test/run-drun/ok/stabilize-optional.drun.ok
new file mode 100644
index 00000000000..ac8ab0ae77c
--- /dev/null
+++ b/test/run-drun/ok/stabilize-optional.drun.ok
@@ -0,0 +1,26 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Err: IC0537: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Attempted to execute a message, but the canister contains no Wasm module..
+Please install code to this canister before calling it. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#wasm-module-not-found
+ingress Completed: Reply: 0x4449444c0000
+debug.print: ?(?null)
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: ?(?null)
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: ?(?null)
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: ?(?null)
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: ?(?null)
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/stabilize-primitive-types.drun-run.ok b/test/run-drun/ok/stabilize-primitive-types.drun-run.ok
new file mode 100644
index 00000000000..88d23cd7dc7
--- /dev/null
+++ b/test/run-drun/ok/stabilize-primitive-types.drun-run.ok
@@ -0,0 +1,85 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+debug.print: Initialized
+ingress Completed: Reply: 0x4449444c0000
+debug.print: null
+debug.print: false
+debug.print: 1_234_567_890_123_456_789_012_345_678_901_234_567_890
+debug.print: 123
+debug.print: 12_345
+debug.print: 1_234_567_890
+debug.print: 123_456_789_012_345
+debug.print: -1_234_567_890_123_456_789_012_345_678_901_234_567_890
+debug.print: -123
+debug.print: -12_345
+debug.print: -1_234_567_890
+debug.print: -123_456_789_012_345
+debug.print: 1.234568
+debug.print: '!'
+debug.print: ""TEST""
+debug.print: "\61\62\63"
+debug.print: un4fu-tqaaa-aaaab-qadjq-cai
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Initialized
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: null
+debug.print: true
+debug.print: 1_234_567_890_123_456_789_012_345_678_901_234_567_891
+debug.print: 124
+debug.print: 12_346
+debug.print: 1_234_567_891
+debug.print: 123_456_789_012_346
+debug.print: -1_234_567_890_123_456_789_012_345_678_901_234_567_890
+debug.print: -123
+debug.print: -12_345
+debug.print: -1_234_567_890
+debug.print: -123_456_789_012_345
+debug.print: 1.334568
+debug.print: '?'
+debug.print: ""TEST" "TEST""
+debug.print: "\61\62\63"
+debug.print: un4fu-tqaaa-aaaab-qadjq-cai
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Initialized
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: null
+debug.print: false
+debug.print: 1_234_567_890_123_456_789_012_345_678_901_234_567_892
+debug.print: 125
+debug.print: 12_347
+debug.print: 1_234_567_892
+debug.print: 123_456_789_012_347
+debug.print: -1_234_567_890_123_456_789_012_345_678_901_234_567_890
+debug.print: -123
+debug.print: -12_345
+debug.print: -1_234_567_890
+debug.print: -123_456_789_012_345
+debug.print: 1.434568
+debug.print: '!'
+debug.print: ""TEST" "TEST" "TEST""
+debug.print: "\61\62\63"
+debug.print: un4fu-tqaaa-aaaab-qadjq-cai
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Initialized
+ingress Completed: Reply: 0x4449444c0000
+debug.print: null
+debug.print: false
+debug.print: 1_234_567_890_123_456_789_012_345_678_901_234_567_892
+debug.print: 125
+debug.print: 12_347
+debug.print: 1_234_567_892
+debug.print: 123_456_789_012_347
+debug.print: -1_234_567_890_123_456_789_012_345_678_901_234_567_890
+debug.print: -123
+debug.print: -12_345
+debug.print: -1_234_567_890
+debug.print: -123_456_789_012_345
+debug.print: 1.434568
+debug.print: '!'
+debug.print: ""TEST" "TEST" "TEST""
+debug.print: "\61\62\63"
+debug.print: un4fu-tqaaa-aaaab-qadjq-cai
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/stabilize-recursive-type.drun.ok b/test/run-drun/ok/stabilize-recursive-type.drun.ok
new file mode 100644
index 00000000000..81634d69248
--- /dev/null
+++ b/test/run-drun/ok/stabilize-recursive-type.drun.ok
@@ -0,0 +1,24 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: CHECK 1
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: CHECK 2
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: Messages are blocked during stabilization.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: Messages are blocked during stabilization.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: Messages are blocked during stabilization.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: Messages are blocked during stabilization.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/stabilize-remove-add-fields.drun.ok b/test/run-drun/ok/stabilize-remove-add-fields.drun.ok
new file mode 100644
index 00000000000..9fc4586be34
--- /dev/null
+++ b/test/run-drun/ok/stabilize-remove-add-fields.drun.ok
@@ -0,0 +1,38 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: firstField=1
+debug.print: secondField=1
+debug.print: thirdField=1
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: firstField=2
+debug.print: secondField=2
+debug.print: thirdField=2
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: secondField=3
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: secondField=4
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: Messages are blocked during stabilization.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: Messages are blocked during stabilization.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: Messages are blocked during stabilization.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: Messages are blocked during stabilization.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/stabilize-service.drun.ok b/test/run-drun/ok/stabilize-service.drun.ok
new file mode 100644
index 00000000000..f6143445dd1
--- /dev/null
+++ b/test/run-drun/ok/stabilize-service.drun.ok
@@ -0,0 +1,25 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: original test1
+debug.print: original test2
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: original test1
+debug.print: original test2
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: reduced test1
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: reduced test1
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: Messages are blocked during stabilization.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/stabilize-text-iter.drun.ok b/test/run-drun/ok/stabilize-text-iter.drun.ok
new file mode 100644
index 00000000000..224f84257a0
--- /dev/null
+++ b/test/run-drun/ok/stabilize-text-iter.drun.ok
@@ -0,0 +1,21 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Err: IC0537: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Attempted to execute a message, but the canister contains no Wasm module..
+Please install code to this canister before calling it. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#wasm-module-not-found
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {stableField = "Version 0"}
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {stableField = "Version 0"}
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {}
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: {}
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/stabilize-to-any.drun.ok b/test/run-drun/ok/stabilize-to-any.drun.ok
new file mode 100644
index 00000000000..308aea92c20
--- /dev/null
+++ b/test/run-drun/ok/stabilize-to-any.drun.ok
@@ -0,0 +1,20 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+debug.print: Version 0
+debug.print: Initialize first variable
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Version 1
+debug.print: Initialize second variable
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Version 2
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: Version 2
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/stabilize-variant.drun.ok b/test/run-drun/ok/stabilize-variant.drun.ok
new file mode 100644
index 00000000000..9c54f0ee325
--- /dev/null
+++ b/test/run-drun/ok/stabilize-variant.drun.ok
@@ -0,0 +1,23 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: #two({key = 1; name = "TEST TEST"})
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: #two({key = 1; name = "TEST TEST TEST"})
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: #two({key = 1; name = "TEST TEST TEST TEST"})
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: #two({key = 1; name = "TEST TEST TEST TEST TEST"})
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/stable-mem-rts-stats.drun-run.ok b/test/run-drun/ok/stable-mem-rts-stats.drun-run.ok
index 79bb9d774dd..e1c59369d2d 100644
--- a/test/run-drun/ok/stable-mem-rts-stats.drun-run.ok
+++ b/test/run-drun/ok/stable-mem-rts-stats.drun-run.ok
@@ -1,10 +1,13 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
-debug.print: {l1 = 0; s1 = 0}
+debug.print: Ignore Diff: (ignored)
 debug.print: {l2 = 16; s2 = 16}
 ingress Completed: Reply: 0x4449444c0000
-debug.print: {l1 = 16; s1 = 17}
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: pattern failed
-debug.print: {l1 = 16; s1 = 17}
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: pattern failed
-debug.print: {l1 = 16; s1 = 17}
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: pattern failed
+debug.print: Ignore Diff: (ignored)
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: pattern failed.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+debug.print: Ignore Diff: (ignored)
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: pattern failed.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+debug.print: Ignore Diff: (ignored)
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: pattern failed.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/stable-mem-rts-stats.tc.ok b/test/run-drun/ok/stable-mem-rts-stats.tc.ok
index b87d82199c0..24c9117a96f 100644
--- a/test/run-drun/ok/stable-mem-rts-stats.tc.ok
+++ b/test/run-drun/ok/stable-mem-rts-stats.tc.ok
@@ -1,7 +1,7 @@
 stable-mem/StableMemory.mo:5.21-5.42: warning [M0199], this code is (or uses) the deprecated library `ExperimentalStableMemory`.
 Please use the `Region` library instead: https://internetcomputer.org/docs/current/motoko/main/stable-memory/stable-regions/#the-region-library or compile with flag `--experimental-stable-memory 1` to suppress this message.
-stable-mem-rts-stats.mo:7.7-7.8: warning [M0145], this pattern of type
+stable-mem-rts-stats.mo:9.7-9.8: warning [M0145], this pattern of type
   Nat64
 does not cover value
   1 or 2 or _
-stable-mem-rts-stats.mo:8.14-8.15: warning [M0194], unused identifier v (delete or rename to wildcard `_` or `_v`)
+stable-mem-rts-stats.mo:10.14-10.15: warning [M0194], unused identifier v (delete or rename to wildcard `_` or `_v`)
diff --git a/test/run-drun/ok/stable-memory-test.drun-run.ok b/test/run-drun/ok/stable-memory-test.drun-run.ok
index 52c2beae4df..e29b620dec0 100644
--- a/test/run-drun/ok/stable-memory-test.drun-run.ok
+++ b/test/run-drun/ok/stable-memory-test.drun-run.ok
@@ -9,4 +9,5 @@ debug.print: Int32
 debug.print: Int64
 debug.print: Float
 debug.print: Blob
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: Done (rolling back changes with trap)
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: Done (rolling back changes with trap).
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/stable-overflow.drun-run.ok b/test/run-drun/ok/stable-overflow.drun-run.ok
index 2048ea0a7e3..17b7b9aa288 100644
--- a/test/run-drun/ok/stable-overflow.drun-run.ok
+++ b/test/run-drun/ok/stable-overflow.drun-run.ok
@@ -1,9 +1,15 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: StableMemory offset out of bounds
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: StableMemory range out of bounds
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: StableMemory range out of bounds
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: StableMemory range out of bounds
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: StableMemory offset out of bounds
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: StableMemory offset out of bounds
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: StableMemory offset out of bounds.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: StableMemory range out of bounds.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: StableMemory range out of bounds.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: StableMemory range out of bounds.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: StableMemory offset out of bounds.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: StableMemory offset out of bounds.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/stable-regions-new-each-stabilization.drun-run.ok b/test/run-drun/ok/stable-regions-new-each-stabilization.drun-run.ok
new file mode 100644
index 00000000000..beb13fc4b33
--- /dev/null
+++ b/test/run-drun/ok/stable-regions-new-each-stabilization.drun-run.ok
@@ -0,0 +1,31 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+debug.print: sanity check. n=0
+ingress Completed: Reply: 0x4449444c0000
+debug.print: upgrading... calling Region.new(), n=0
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: sanity check. n=1
+debug.print:  unwrapping. i=0
+debug.print: 16
+ingress Completed: Reply: 0x4449444c0000
+debug.print: upgrading... calling Region.new(), n=1
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: sanity check. n=2
+debug.print:  unwrapping. i=0
+debug.print: 16
+debug.print:  unwrapping. i=1
+debug.print: 17
+ingress Completed: Reply: 0x4449444c0000
+debug.print: upgrading... calling Region.new(), n=2
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: sanity check. n=3
+debug.print:  unwrapping. i=0
+debug.print: 16
+debug.print:  unwrapping. i=1
+debug.print: 17
+debug.print:  unwrapping. i=2
+debug.print: 18
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/stable-regions-stabilization.drun-run.ok b/test/run-drun/ok/stable-regions-stabilization.drun-run.ok
new file mode 100644
index 00000000000..b1eb2a90aba
--- /dev/null
+++ b/test/run-drun/ok/stable-regions-stabilization.drun-run.ok
@@ -0,0 +1,13 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+debug.print: grow three big regions (including region0).
+debug.print: grow three big regions: done.
+ingress Completed: Reply: 0x4449444c0000
+debug.print: sanity check. n=0
+ingress Completed: Reply: 0x4449444c0000
+debug.print: upgrading... n=0
+ingress Completed: Reply: 0x4449444c0000
+debug.print: grow three big regions (including region0).
+debug.print: grow three big regions: done.
+ingress Completed: Reply: 0x4449444c0000
+debug.print: sanity check. n=1
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/stable-regions-stabilization.tc.ok b/test/run-drun/ok/stable-regions-stabilization.tc.ok
new file mode 100644
index 00000000000..4c1a57bfe2b
--- /dev/null
+++ b/test/run-drun/ok/stable-regions-stabilization.tc.ok
@@ -0,0 +1,2 @@
+stable-mem/StableMemory.mo:5.21-5.42: warning [M0199], this code is (or uses) the deprecated library `ExperimentalStableMemory`.
+Please use the `Region` library instead: https://internetcomputer.org/docs/current/motoko/main/stable-memory/stable-regions/#the-region-library or compile with flag `--experimental-stable-memory 1` to suppress this message.
diff --git a/test/run-drun/ok/stable-size-overflow.drun-run.ok b/test/run-drun/ok/stable-size-overflow.drun-run.ok
new file mode 100644
index 00000000000..4ffc76fe28a
--- /dev/null
+++ b/test/run-drun/ok/stable-size-overflow.drun-run.ok
@@ -0,0 +1,5 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+debug.print: upgrading...
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: buffer_size overflow.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/stable-size-overflow.tc.ok b/test/run-drun/ok/stable-size-overflow.tc.ok
index 11562f3059c..4c1a57bfe2b 100644
--- a/test/run-drun/ok/stable-size-overflow.tc.ok
+++ b/test/run-drun/ok/stable-size-overflow.tc.ok
@@ -1,3 +1,2 @@
 stable-mem/StableMemory.mo:5.21-5.42: warning [M0199], this code is (or uses) the deprecated library `ExperimentalStableMemory`.
 Please use the `Region` library instead: https://internetcomputer.org/docs/current/motoko/main/stable-memory/stable-regions/#the-region-library or compile with flag `--experimental-stable-memory 1` to suppress this message.
-stable-size-overflow.mo:12.7-12.8: warning [M0194], unused identifier a (delete or rename to wildcard `_` or `_a`)
diff --git a/test/run-drun/ok/trap-on-async-failure.drun-run.ok b/test/run-drun/ok/trap-on-async-failure.drun-run.ok
index 3ec84b5fb7d..e0970d9cd01 100644
--- a/test/run-drun/ok/trap-on-async-failure.drun-run.ok
+++ b/test/run-drun/ok/trap-on-async-failure.drun-run.ok
@@ -1,7 +1,9 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
 debug.print: test1:
-debug.print: test1: (#canister_error, "IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: could not perform self call")
+debug.print: test1: (#canister_error, "IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: could not perform self call.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly")
 debug.print: test2:
-debug.print: test2: (#canister_error, "IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: could not perform self call")
+debug.print: test2: (#canister_error, "IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: could not perform self call.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly")
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/trap-on-await-failure.drun-run.ok b/test/run-drun/ok/trap-on-await-failure.drun-run.ok
index 3ec84b5fb7d..e0970d9cd01 100644
--- a/test/run-drun/ok/trap-on-await-failure.drun-run.ok
+++ b/test/run-drun/ok/trap-on-await-failure.drun-run.ok
@@ -1,7 +1,9 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
 debug.print: test1:
-debug.print: test1: (#canister_error, "IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: could not perform self call")
+debug.print: test1: (#canister_error, "IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: could not perform self call.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly")
 debug.print: test2:
-debug.print: test2: (#canister_error, "IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: could not perform self call")
+debug.print: test2: (#canister_error, "IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: could not perform self call.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly")
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/trap-on-call-raw-failure.drun-run.ok b/test/run-drun/ok/trap-on-call-raw-failure.drun-run.ok
index d944cbd3e38..a9d8332e9e6 100644
--- a/test/run-drun/ok/trap-on-call-raw-failure.drun-run.ok
+++ b/test/run-drun/ok/trap-on-call-raw-failure.drun-run.ok
@@ -1,7 +1,9 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
 debug.print: test1:
-debug.print: test1: (#canister_error, "IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: could not perform raw call")
+debug.print: test1: (#canister_error, "IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: could not perform raw call.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly")
 debug.print: test2:
-debug.print: test2: (#canister_error, "IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: could not perform raw call")
+debug.print: test2: (#canister_error, "IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: could not perform raw call.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly")
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/trap-on-local-failure.drun-run.ok b/test/run-drun/ok/trap-on-local-failure.drun-run.ok
index 3ec84b5fb7d..e0970d9cd01 100644
--- a/test/run-drun/ok/trap-on-local-failure.drun-run.ok
+++ b/test/run-drun/ok/trap-on-local-failure.drun-run.ok
@@ -1,7 +1,9 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
 debug.print: test1:
-debug.print: test1: (#canister_error, "IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: could not perform self call")
+debug.print: test1: (#canister_error, "IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: could not perform self call.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly")
 debug.print: test2:
-debug.print: test2: (#canister_error, "IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: could not perform self call")
+debug.print: test2: (#canister_error, "IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: could not perform self call.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly")
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/trap-on-send-failure.drun-run.ok b/test/run-drun/ok/trap-on-send-failure.drun-run.ok
index f3d0b9a517c..8aa23a39118 100644
--- a/test/run-drun/ok/trap-on-send-failure.drun-run.ok
+++ b/test/run-drun/ok/trap-on-send-failure.drun-run.ok
@@ -1,7 +1,9 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
 debug.print: test1:
-debug.print: test1: (#canister_error, "IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: could not perform remote call")
+debug.print: test1: (#canister_error, "IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: could not perform remote call.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly")
 debug.print: test2:
-debug.print: test2: (#canister_error, "IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: could not perform remote call")
+debug.print: test2: (#canister_error, "IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: could not perform remote call.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly")
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/try-finally-trap-stop.drun-run.ok b/test/run-drun/ok/try-finally-trap-stop.drun-run.ok
index 4c4e1f684ba..315e50d1606 100644
--- a/test/run-drun/ok/try-finally-trap-stop.drun-run.ok
+++ b/test/run-drun/ok/try-finally-trap-stop.drun-run.ok
@@ -5,7 +5,8 @@ debug.print: trap in finally!
 debug.print: trap in finally!
 debug.print: {rts_callback_table_count = 1}
 debug.print: canister running
-debug.print: Canister rrkah-fqaaa-aaaaa-aaaaq-cai trapped explicitly: canister_pre_upgrade attempted with outstanding message callbacks (try stopping the canister before upgrade)
+debug.print: Error from Canister rrkah-fqaaa-aaaaa-aaaaq-cai: Canister called `ic0.trap` with message: canister_pre_upgrade attempted with outstanding message callbacks (try stopping the canister before upgrade).
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: canister stopped
 debug.print: canister upgraded
 debug.print: {rts_callback_table_count = 0}
diff --git a/test/run-drun/ok/try-finally-trap.drun.ok b/test/run-drun/ok/try-finally-trap.drun.ok
index 721b03b1999..80205580c08 100644
--- a/test/run-drun/ok/try-finally-trap.drun.ok
+++ b/test/run-drun/ok/try-finally-trap.drun.ok
@@ -2,13 +2,14 @@ ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a000000000000000001
 ingress Completed: Reply: 0x4449444c0000
 ingress Completed: Reply: 0x4449444c0000
 debug.print: trap in finally!
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: assertion failed at try-finally-trap.mo:8.7-8.19
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: assertion failed at try-finally-trap.mo:8.7-8.19
 
 call_on_cleanup also failed:
 
-Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: assertion failed at try-finally-trap.mo:12.7-12.19
+Canister called `ic0.trap` with message: assertion failed at try-finally-trap.mo:12.7-12.19
 debug.print: {rts_callback_table_count = 1}
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: canister_pre_upgrade attempted with outstanding message callbacks (try stopping the canister before upgrade)
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: canister_pre_upgrade attempted with outstanding message callbacks (try stopping the canister before upgrade).
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: {rts_callback_table_count = 1}
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/try-finally.drun-run.ok b/test/run-drun/ok/try-finally.drun-run.ok
index 38c1da2134d..6b78c6f9228 100644
--- a/test/run-drun/ok/try-finally.drun-run.ok
+++ b/test/run-drun/ok/try-finally.drun-run.ok
@@ -93,8 +93,10 @@ debug.print: InnerIN8t
 debug.print: OUT8t
 debug.print: OTHER SIDE
 debug.print: OTHER SIDE: CLEANING UP
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: assertion failed at try-finally.mo:324.17-324.29
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: assertion failed at try-finally.mo:330.25-330.41
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: assertion failed at try-finally.mo:324.17-324.29.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: assertion failed at try-finally.mo:330.25-330.41.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: OUTER CAUGHT: foo
 debug.print: BEFORE7
 debug.print: IN7
@@ -104,7 +106,9 @@ debug.print: InnerLIVESTILL7
 debug.print: InnerOUT7
 debug.print: OUT7
 debug.print: It's over
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: assertion failed at try-finally.mo:238.17-238.29
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: assertion failed at try-finally.mo:238.17-238.29.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: go2
 debug.print: It's so over
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: assertion failed at try-finally.mo:350.13-350.25
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: assertion failed at try-finally.mo:350.13-350.25.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/tuple-record-mismatch.drun.ok b/test/run-drun/ok/tuple-record-mismatch.drun.ok
index c46182ff707..e565700968b 100644
--- a/test/run-drun/ok/tuple-record-mismatch.drun.ok
+++ b/test/run-drun/ok/tuple-record-mismatch.drun.ok
@@ -1,5 +1,7 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/tuple-stabilization.drun.ok b/test/run-drun/ok/tuple-stabilization.drun.ok
new file mode 100644
index 00000000000..ba49d013d91
--- /dev/null
+++ b/test/run-drun/ok/tuple-stabilization.drun.ok
@@ -0,0 +1,50 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+debug.print: (1, 2)
+debug.print: (0, "Test", 1.230000, {key = 5; value = '_'}, [-1, +2, -3])
+debug.print: ?(2, ?(1, null, null), ?(4, ?(3, null, null), ?(5, null, null)))
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: (1, 2)
+debug.print: (0, "Test", 1.230000, {key = 5; value = '_'}, [-1, +2, -3])
+debug.print: ?(2, ?(1, null, null), ?(4, ?(3, null, null), ?(5, null, null)))
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: ()
+debug.print: (1, 2)
+debug.print: (0, "Test", 1.230000, {key = 5; value = '_'}, [-1, +2, -3])
+debug.print: ?(2, ?(1, null, null), ?(4, ?(3, null, null), ?(5, null, null)))
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: ()
+debug.print: (1, 2)
+debug.print: (0, "Test", 1.230000, {key = 5; value = '_'}, [-1, +2, -3])
+debug.print: ?(2, ?(1, null, null), ?(4, ?(3, null, null), ?(5, null, null)))
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Completed: Reply: 0x4449444c0000
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: (1, 2)
+debug.print: (0, "Test", 1.230000, {key = 5; value = '_'}, [-1, +2, -3])
+debug.print: ?(2, ?(1, null, null), ?(4, ?(3, null, null), ?(5, null, null)))
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: (1, 2)
+debug.print: (0, "Test", 1.230000, {key = 5; value = '_'}, [-1, +2, -3])
+debug.print: ?(2, ?(1, null, null), ?(4, ?(3, null, null), ?(5, null, null)))
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/tuple-upgrades.drun.ok b/test/run-drun/ok/tuple-upgrades.drun.ok
index b154425ced1..53462fc28d1 100644
--- a/test/run-drun/ok/tuple-upgrades.drun.ok
+++ b/test/run-drun/ok/tuple-upgrades.drun.ok
@@ -21,10 +21,14 @@ debug.print: (1, 2)
 debug.print: (0, "Test", 1.230000, {key = 5; value = '_'}, [-1, +2, -3])
 debug.print: ?(2, ?(1, null, null), ?(4, ?(3, null, null), ?(5, null, null)))
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 ingress Completed: Reply: 0x4449444c0000
 debug.print: (1, 2)
 debug.print: (0, "Test", 1.230000, {key = 5; value = '_'}, [-1, +2, -3])
diff --git a/test/run-drun/ok/upgrade-bignat-bigint.drun.ok b/test/run-drun/ok/upgrade-bignat-bigint.drun.ok
index be20fcc0751..7c4347a98de 100644
--- a/test/run-drun/ok/upgrade-bignat-bigint.drun.ok
+++ b/test/run-drun/ok/upgrade-bignat-bigint.drun.ok
@@ -15,4 +15,5 @@ ingress Completed: Reply: 0x4449444c0000
 ingress Completed: Reply: 0x4449444c0000
 debug.print: +12_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_888
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/upgrade-bignums.drun-run.ok b/test/run-drun/ok/upgrade-bignums.drun-run.ok
new file mode 100644
index 00000000000..c2ad671724d
--- /dev/null
+++ b/test/run-drun/ok/upgrade-bignums.drun-run.ok
@@ -0,0 +1,15 @@
+ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 12_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890
+debug.print: -12_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890_123_456_789_012_345_678_901_234_567_890
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 24_691_357_802_469_135_780_246_913_578_024_691_357_802_469_135_780_246_913_578_024_691_357_802_469_135_781
+debug.print: -24_691_357_802_469_135_780_246_913_578_024_691_357_802_469_135_780_246_913_578_024_691_357_802_469_135_781
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+ingress Completed: Reply: 0x4449444c0000
+debug.print: 49_382_715_604_938_271_560_493_827_156_049_382_715_604_938_271_560_493_827_156_049_382_715_604_938_271_563
+debug.print: -49_382_715_604_938_271_560_493_827_156_049_382_715_604_938_271_560_493_827_156_049_382_715_604_938_271_563
+ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/upgrade-generics.drun.ok b/test/run-drun/ok/upgrade-generics.drun.ok
index 136ec2202df..a0fb0b59e0b 100644
--- a/test/run-drun/ok/upgrade-generics.drun.ok
+++ b/test/run-drun/ok/upgrade-generics.drun.ok
@@ -11,7 +11,8 @@ ingress Completed: Reply: 0x4449444c0000
 ingress Completed: Reply: 0x4449444c0000
 debug.print: (([0], [+1]), true)
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 ingress Completed: Reply: 0x4449444c0000
 debug.print: (([0], [+1]), true)
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/upgrade-mutable-array.drun.ok b/test/run-drun/ok/upgrade-mutable-array.drun.ok
index 4c7859d1354..5bcd7316c48 100644
--- a/test/run-drun/ok/upgrade-mutable-array.drun.ok
+++ b/test/run-drun/ok/upgrade-mutable-array.drun.ok
@@ -7,7 +7,8 @@ debug.print: firstField=0
 debug.print: secondField=0
 debug.print: thirdField=0
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: firstField=0
 debug.print: secondField=0
 debug.print: thirdField=0
diff --git a/test/run-drun/ok/upgrade-mutable-service.drun.ok b/test/run-drun/ok/upgrade-mutable-service.drun.ok
index 45933d721fe..2bcb96f77bf 100644
--- a/test/run-drun/ok/upgrade-mutable-service.drun.ok
+++ b/test/run-drun/ok/upgrade-mutable-service.drun.ok
@@ -20,7 +20,8 @@ debug.print: original test1
 debug.print: original test2
 debug.print: original test3
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 ingress Completed: Reply: 0x4449444c0000
 debug.print: original test0
 debug.print: original test1
diff --git a/test/run-drun/ok/upgrade-mutable-variant.drun.ok b/test/run-drun/ok/upgrade-mutable-variant.drun.ok
index 09d407cb897..b59108bd8c2 100644
--- a/test/run-drun/ok/upgrade-mutable-variant.drun.ok
+++ b/test/run-drun/ok/upgrade-mutable-variant.drun.ok
@@ -3,7 +3,8 @@ ingress Completed: Reply: 0x4449444c0000
 debug.print: instance=[var #Option1]
 debug.print: alias=[var #Option1]
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: instance=[var #Option1]
 debug.print: alias=[var #Option1]
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/upgrade-mutable.drun.ok b/test/run-drun/ok/upgrade-mutable.drun.ok
index 160a60b06d0..ef90288ad93 100644
--- a/test/run-drun/ok/upgrade-mutable.drun.ok
+++ b/test/run-drun/ok/upgrade-mutable.drun.ok
@@ -9,4 +9,5 @@ ingress Completed: Reply: 0x4449444c0000
 debug.print: [var {key = 2; value = "2"}]
 debug.print: [var {key = 2; value = "2"}]
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/upgrade-nat-to-int.drun.ok b/test/run-drun/ok/upgrade-nat-to-int.drun.ok
index 481c689f56d..f5d7ac918e7 100644
--- a/test/run-drun/ok/upgrade-nat-to-int.drun.ok
+++ b/test/run-drun/ok/upgrade-nat-to-int.drun.ok
@@ -15,4 +15,5 @@ ingress Completed: Reply: 0x4449444c0000
 ingress Completed: Reply: 0x4449444c0000
 debug.print: -2
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/upgrade-non-stable.drun.ok b/test/run-drun/ok/upgrade-non-stable.drun.ok
index ce9825f8ad1..26f680be00a 100644
--- a/test/run-drun/ok/upgrade-non-stable.drun.ok
+++ b/test/run-drun/ok/upgrade-non-stable.drun.ok
@@ -11,4 +11,5 @@ ingress Completed: Reply: 0x4449444c0000
 ingress Completed: Reply: 0x4449444c0000
 debug.print: {}
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/upgrade-recursive-type.drun.ok b/test/run-drun/ok/upgrade-recursive-type.drun.ok
index c1bfca7bfc9..f577925ce09 100644
--- a/test/run-drun/ok/upgrade-recursive-type.drun.ok
+++ b/test/run-drun/ok/upgrade-recursive-type.drun.ok
@@ -7,11 +7,13 @@ ingress Completed: Reply: 0x4449444c0000
 ingress Completed: Reply: 0x4449444c0000
 debug.print: CHECK 2
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 ingress Completed: Reply: 0x4449444c0000
 debug.print: CHECK 3
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 ingress Completed: Reply: 0x4449444c0000
 debug.print: CHECK 4
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/upgrade-remove-add-fields.drun.ok b/test/run-drun/ok/upgrade-remove-add-fields.drun.ok
index e95999ccf86..21a27f7116b 100644
--- a/test/run-drun/ok/upgrade-remove-add-fields.drun.ok
+++ b/test/run-drun/ok/upgrade-remove-add-fields.drun.ok
@@ -19,11 +19,13 @@ ingress Completed: Reply: 0x4449444c0000
 ingress Completed: Reply: 0x4449444c0000
 debug.print: secondField=4
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 ingress Completed: Reply: 0x4449444c0000
 debug.print: secondField=5
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 ingress Completed: Reply: 0x4449444c0000
 debug.print: secondField=6
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/upgrade-service.drun.ok b/test/run-drun/ok/upgrade-service.drun.ok
index 9706bca1b6f..263e8d433d8 100644
--- a/test/run-drun/ok/upgrade-service.drun.ok
+++ b/test/run-drun/ok/upgrade-service.drun.ok
@@ -15,6 +15,7 @@ ingress Completed: Reply: 0x4449444c0000
 ingress Completed: Reply: 0x4449444c0000
 debug.print: reduced test1
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 debug.print: reduced test1
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/ok/variant-upgrades.drun.ok b/test/run-drun/ok/variant-upgrades.drun.ok
index cd317ed4b8d..7ed13fc3fa3 100644
--- a/test/run-drun/ok/variant-upgrades.drun.ok
+++ b/test/run-drun/ok/variant-upgrades.drun.ok
@@ -15,5 +15,7 @@ ingress Completed: Reply: 0x4449444c0000
 ingress Completed: Reply: 0x4449444c0000
 debug.print: #two({key = 1; name = "TEST TEST TEST TEST TEST"})
 ingress Completed: Reply: 0x4449444c0000
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
-ingress Err: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: RTS error: Memory-incompatible program upgrade
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
+ingress Err: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: RTS error: Memory-incompatible program upgrade.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
diff --git a/test/run-drun/ok/vec-any-bomb.drun-run.ok b/test/run-drun/ok/vec-any-bomb.drun-run.ok
index 9b1905ecaaa..83ddcd511ed 100644
--- a/test/run-drun/ok/vec-any-bomb.drun-run.ok
+++ b/test/run-drun/ok/vec-any-bomb.drun-run.ok
@@ -1,4 +1,5 @@
 ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101
 ingress Completed: Reply: 0x4449444c0000
-debug.print: IC0503: Canister rwlgt-iiaaa-aaaaa-aaaaa-cai trapped explicitly: IDL error: exceeded value limit
+debug.print: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister called `ic0.trap` with message: IDL error: exceeded value limit.
+Consider gracefully handling failures from this canister or altering the canister to handle exceptions. See documentation: http://internetcomputer.org/docs/current/references/execution-errors#trapped-explicitly
 ingress Completed: Reply: 0x4449444c0000
diff --git a/test/run-drun/oneshot-callbacks.mo b/test/run-drun/oneshot-callbacks.mo
index b75366aab04..20e9a149717 100644
--- a/test/run-drun/oneshot-callbacks.mo
+++ b/test/run-drun/oneshot-callbacks.mo
@@ -1,3 +1,4 @@
+//MOC-NO-FORCE-GC
 import Prim "mo:⛔";
 
 actor a {
diff --git a/test/run-drun/query-footprint-overflow.mo b/test/run-drun/query-footprint-overflow.mo
index 195c5f19def..9f8017eee1a 100644
--- a/test/run-drun/query-footprint-overflow.mo
+++ b/test/run-drun/query-footprint-overflow.mo
@@ -1,3 +1,4 @@
+//CLASSICAL-PERSISTENCE-ONLY
 import { Array_tabulate; stableVarQuery } = "mo:⛔"
 
 actor footprint = {
diff --git a/test/run-drun/query-footprint.mo b/test/run-drun/query-footprint.mo
index 9836dd4a7b5..0331b086f4f 100644
--- a/test/run-drun/query-footprint.mo
+++ b/test/run-drun/query-footprint.mo
@@ -1,3 +1,4 @@
+//CLASSICAL-PERSISTENCE-ONLY
 import { stableVarQuery } "mo:⛔";
 
 actor footprint = {
diff --git a/test/run-drun/region0-rts-stats.mo b/test/run-drun/region0-rts-stats.mo
index 8224f9123c8..a61d07291fc 100644
--- a/test/run-drun/region0-rts-stats.mo
+++ b/test/run-drun/region0-rts-stats.mo
@@ -4,7 +4,9 @@ import StableMemory "stable-mem/StableMemory";
 actor {
   let s1 = P.rts_stable_memory_size();
   let l1 = P.rts_logical_stable_memory_size();
-  P.debugPrint (debug_show({s1;l1}));
+  assert (s1 == 6 or s1 == 145 or s1 == 146);
+  assert (l1 == 6 or l1 == 144);
+  P.debugPrint ("Ignore Diff: "# debug_show({s1;l1}));
   let 0 = StableMemory.grow(16);
   stable var v = StableMemory.loadBlob(0, 65536);
   let s2 = P.rts_stable_memory_size();
diff --git a/test/run-drun/rts-stats.mo b/test/run-drun/rts-stats.mo
index 3290a5654c8..57d7a0cb891 100644
--- a/test/run-drun/rts-stats.mo
+++ b/test/run-drun/rts-stats.mo
@@ -9,9 +9,9 @@ let a1 = Prim.rts_total_allocation();
 // Prim.debugPrint("Size and allocation before: " # debug_show (s0, a0));
 // Prim.debugPrint("Size and allocation after:  " # debug_show (s1, a1));
 
-// Differences between incremental and non-incremental GC (additional forwarding header field)
-assert (+s1-s0 == 10008 or +s1-s0 == 10012);
-assert (+a1-a0 == 10008 or +a1-a0 == 10012);
+// Differences between classical persistence (incremental and non-incremental GC) and enhanced orthogonal persistence:
+assert (+s1-s0 == 10008 or +s1-s0 == 10012 or +s1-s0 == 20024);
+assert (+a1-a0 == 10008 or +a1-a0 == 10012 or +a1-a0 == 20024);
 
 assert(Prim.rts_memory_size() > Prim.rts_heap_size());
 
diff --git a/test/run-drun/rts-stats2.mo b/test/run-drun/rts-stats2-classical.mo
similarity index 96%
rename from test/run-drun/rts-stats2.mo
rename to test/run-drun/rts-stats2-classical.mo
index 6c6c9922918..d6d2829ba21 100644
--- a/test/run-drun/rts-stats2.mo
+++ b/test/run-drun/rts-stats2-classical.mo
@@ -1,3 +1,4 @@
+//CLASSICAL-PERSISTENCE-ONLY
 import Prim "mo:⛔";
 actor a {
   func runGC(): async() {
@@ -51,4 +52,4 @@ actor a {
 //CALL ingress check_A "DIDL\x00\x00"
 //CALL ingress bar "DIDL\x00\x00"
 //CALL ingress check_B "DIDL\x00\x00"
-//CALL ingress check_B "DIDL\x00\x00"
+//CALL ingress check_B "DIDL\x00\x00"
\ No newline at end of file
diff --git a/test/run-drun/rts-stats2-enhanced.mo b/test/run-drun/rts-stats2-enhanced.mo
new file mode 100644
index 00000000000..719a291e3f8
--- /dev/null
+++ b/test/run-drun/rts-stats2-enhanced.mo
@@ -0,0 +1,55 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+import Prim "mo:⛔";
+actor a {
+  func runGC(): async() {
+    var count = 0;
+    // run multiple GC increments for the incremental GC
+    while (count < 3) {
+      await async();
+      count += 1;
+    }
+  };
+
+  let length = 8 * 1024 * 1024; 
+  public func foo(): async() {
+    ignore(Prim.Array_init<()>(length, ())); 
+    await runGC();
+  };
+  public func check_A() {
+    Prim.debugPrint("Ignore Diff: Reclaimed: " # debug_show Prim.rts_reclaimed());
+    assert (Prim.rts_reclaimed() > 8 * length);
+    // Generational GC has additional remembered set that is discarded on each GC run
+    // Debug mode for generational GC also produces additional memory snapshots for sanity checks
+    assert (Prim.rts_reclaimed() < 10 * length);
+
+    Prim.debugPrint("Ignore Diff: Live size: " # debug_show Prim.rts_max_live_size());
+    assert (Prim.rts_max_live_size() < 500_000);
+  };
+  flexible var v : [var ()] = [var];
+  public func bar(): async() {
+    v := Prim.Array_init<()>(length, ()); // larger amount to trigger incremental GC
+    await runGC();
+  };
+  public func check_B() {
+    Prim.debugPrint("Ignore Diff: Reclaimed: " # debug_show Prim.rts_reclaimed());
+    assert (Prim.rts_reclaimed() > 8 * length);
+    // Generational GC has additional remembered set that is discarded on each GC run
+    // Debug mode for generational GC also produces additional memory snapshots for sanity checks
+    assert (Prim.rts_reclaimed() < 16 * length);
+
+    Prim.debugPrint("Ignore Diff: Live size: " # debug_show Prim.rts_max_live_size());
+    assert (Prim.rts_max_live_size() > 8 * length);
+    assert (Prim.rts_max_live_size() < 10 * length);
+  };
+}
+// no point running these in the interpreter
+//SKIP run
+//SKIP run-low
+//SKIP run-ir
+
+//CALL ingress foo "DIDL\x00\x00"
+//CALL ingress check_A "DIDL\x00\x00"
+//CALL ingress check_A "DIDL\x00\x00"
+//CALL ingress bar "DIDL\x00\x00"
+//CALL ingress check_B "DIDL\x00\x00"
+//CALL ingress check_B "DIDL\x00\x00"
diff --git a/test/run-drun/runtime-info.mo b/test/run-drun/runtime-info.mo
index 4fcf8c4f389..eb139d8da83 100644
--- a/test/run-drun/runtime-info.mo
+++ b/test/run-drun/runtime-info.mo
@@ -10,7 +10,7 @@ actor Self {
     };
 
     func validGC(strategy : Text) : Bool {
-        for (name in ["copying", "compacting", "generational", "incremental"].vals()) {
+        for (name in ["copying", "compacting", "generational", "incremental", "default"].vals()) {
             if (strategy == name # " force") {
                 return true;
             };
diff --git a/test/run-drun/stabilization-authorization.mo b/test/run-drun/stabilization-authorization.mo
new file mode 100644
index 00000000000..f63a46c2e26
--- /dev/null
+++ b/test/run-drun/stabilization-authorization.mo
@@ -0,0 +1,43 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+//MOC-FLAG --stabilization-instruction-limit=10000
+
+import Prim "mo:⛔";
+import Cycles = "cycles/cycles";
+import UpgradeTarget "stabilization-authorization/upgrade-target";
+import AccessTester "stabilization-authorization/access-tester";
+
+actor a {
+  type IncrementalStabilization = actor {
+    __motoko_stabilize_before_upgrade : () -> async ();
+    __motoko_destabilize_after_upgrade : () -> async ();
+  };
+
+  func useIncrementalStabilization(a : actor {}) : IncrementalStabilization {
+    actor (debug_show (Prim.principalOfActor(a))) : IncrementalStabilization;
+  };
+
+  public func run() : async () {
+    if (Cycles.balance() == 0) {
+      await Cycles.provisional_top_up_actor(a, 100_000_000_000_000);
+    };
+
+    Cycles.add(2_000_000_000_000);
+    let upgradeTarget = await UpgradeTarget.UpgradeTarget();
+    let testStabilization = useIncrementalStabilization(upgradeTarget);
+
+    Cycles.add(2_000_000_000_000);
+    let accessTester = await AccessTester.AccessTester(testStabilization);
+    await accessTester.test();
+
+    Prim.debugPrint("Test upgrade");
+    await testStabilization.__motoko_stabilize_before_upgrade();
+    ignore await (system UpgradeTarget.UpgradeTarget)(#upgrade upgradeTarget)();
+
+    await accessTester.test();
+  };
+};
+
+//CALL ingress run "DIDL\x00\x00"
+//SKIP run
+//SKIP run-ir
+//SKIP run-low
diff --git a/test/run-drun/stabilization-authorization/access-tester.mo b/test/run-drun/stabilization-authorization/access-tester.mo
new file mode 100644
index 00000000000..d74d7671d34
--- /dev/null
+++ b/test/run-drun/stabilization-authorization/access-tester.mo
@@ -0,0 +1,23 @@
+import Prim "mo:⛔";
+
+actor class AccessTester(
+    other : actor {
+        __motoko_stabilize_before_upgrade : () -> async ();
+        __motoko_destabilize_after_upgrade : () -> async ();
+    }
+) {
+    public func test() : async () {
+        Prim.debugPrint("Test __motoko_stabilize_before_upgrade");
+        try {
+            await other.__motoko_stabilize_before_upgrade();
+        } catch (e) {
+            Prim.debugPrint(Prim.errorMessage(e));
+        };
+        try {
+            Prim.debugPrint("Test __motoko_stabilize_after_upgrade");
+            await other.__motoko_destabilize_after_upgrade();
+        } catch (e) {
+            Prim.debugPrint(Prim.errorMessage(e));
+        };
+    };
+};
diff --git a/test/run-drun/stabilization-authorization/upgrade-target.mo b/test/run-drun/stabilization-authorization/upgrade-target.mo
new file mode 100644
index 00000000000..ab617d92a52
--- /dev/null
+++ b/test/run-drun/stabilization-authorization/upgrade-target.mo
@@ -0,0 +1,13 @@
+import Prim "mo:⛔";
+
+actor class UpgradeTarget() {
+  stable var _stableArray = Prim.Array_tabulate(100_000, func(index) { index });
+
+  system func preupgrade() {
+    Prim.debugPrint("PRE-UPGRADE HOOK!");
+  };
+
+  system func postupgrade() {
+    Prim.debugPrint("POST-UPGRADE HOOK!");
+  };
+};
diff --git a/test/run-drun/stabilization-instructions.mo b/test/run-drun/stabilization-instructions.mo
new file mode 100644
index 00000000000..b028ac44f16
--- /dev/null
+++ b/test/run-drun/stabilization-instructions.mo
@@ -0,0 +1,44 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+import Prim "mo:prim";
+
+actor {
+  stable var lastInstructions = 0;
+  stable var length = 0;
+  stable var stableArray : [Nat] = [];
+
+  if (length > 0) {
+    assert (Prim.rts_upgrade_instructions() >= lastInstructions);
+    lastInstructions := Prim.rts_upgrade_instructions();
+    assert(lastInstructions > 0);
+  };
+  Prim.debugPrint("Ignore Diff: Upgrade instructions: " # debug_show (Prim.rts_upgrade_instructions()));
+
+  public func increase() : async () {
+    if (length == 0) {
+      length := 1_000;
+    } else {
+      length *= 10;
+    };
+    let newArray = Prim.Array_tabulate(
+      length,
+      func(index) {
+        index * index;
+      },
+    );
+    stableArray := newArray;
+  };
+};
+
+//SKIP run
+//SKIP run-ir
+//SKIP run-low
+
+//CALL ingress increase "DIDL\x00\x00"
+//CALL ingress __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+//CALL upgrade ""
+//CALL ingress increase "DIDL\x00\x00"
+//CALL ingress __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+//CALL upgrade ""
+//CALL ingress increase "DIDL\x00\x00"
+//CALL ingress __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+//CALL upgrade ""
diff --git a/test/run-drun/stabilization-upgrade-hooks.drun b/test/run-drun/stabilization-upgrade-hooks.drun
new file mode 100644
index 00000000000..498d17c7543
--- /dev/null
+++ b/test/run-drun/stabilization-upgrade-hooks.drun
@@ -0,0 +1,18 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+install $ID stabilization-upgrade-hooks/upgrade-hook.mo ""
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilization-upgrade-hooks/upgrade-hook0.mo ""
+query $ID check "DIDL\x00\x01\x7d\x01"
+ingress $ID inc "DIDL\x00\x00"
+query $ID check "DIDL\x00\x01\x7d\x02"
+ingress $ID inc "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilization-upgrade-hooks/upgrade-hook1.mo ""
+query $ID check "DIDL\x00\x01\x7d\x03"
+ingress $ID inc "DIDL\x00\x00"
+query $ID check "DIDL\x00\x01\x7d\x04"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilization-upgrade-hooks/upgrade-hook2.mo ""
+query $ID check "DIDL\x00\x01\x7d\x04"
+ingress $ID inc "DIDL\x00\x00"
+query $ID check "DIDL\x00\x01\x7d\x05"
diff --git a/test/run-drun/stabilization-upgrade-hooks/upgrade-hook.mo b/test/run-drun/stabilization-upgrade-hooks/upgrade-hook.mo
new file mode 100644
index 00000000000..9ac1aee64a2
--- /dev/null
+++ b/test/run-drun/stabilization-upgrade-hooks/upgrade-hook.mo
@@ -0,0 +1,4 @@
+import Prim "mo:⛔";
+actor {
+  Prim.debugPrint ("init'ed");
+}
diff --git a/test/run-drun/stabilization-upgrade-hooks/upgrade-hook0.mo b/test/run-drun/stabilization-upgrade-hooks/upgrade-hook0.mo
new file mode 100644
index 00000000000..288c2b0ced6
--- /dev/null
+++ b/test/run-drun/stabilization-upgrade-hooks/upgrade-hook0.mo
@@ -0,0 +1,20 @@
+import Prim "mo:⛔";
+actor {
+  Prim.debugPrint ("init'ed 0");
+  stable var c = "a";
+  var d = c; // unstable cached state
+  public func inc() { d #= "a"; };
+  public query func check(n : Int) : async () {
+    Prim.debugPrint(d);
+    assert (d.size() == n);
+  };
+  system func preupgrade(){
+    Prim.debugPrint("preupgrade 0");
+    c := d;
+  };
+  system func postupgrade(){
+    Prim.debugPrint("postupgrade 0");
+    d := c;
+  };
+}
+
diff --git a/test/run-drun/stabilization-upgrade-hooks/upgrade-hook1.mo b/test/run-drun/stabilization-upgrade-hooks/upgrade-hook1.mo
new file mode 100644
index 00000000000..b930003ed11
--- /dev/null
+++ b/test/run-drun/stabilization-upgrade-hooks/upgrade-hook1.mo
@@ -0,0 +1,25 @@
+import Prim "mo:⛔";
+actor {
+  Prim.debugPrint ("init'ed 1");
+  stable let c = "a";
+  stable var i : Nat = c.size();
+  var j = 0; // unstable cached state
+  public func inc() { j += 1; };
+  public query func check(n : Int) : async () {
+    Prim.debugPrintNat(j);
+    Prim.debugPrint(c);
+    assert (j == n);
+    assert (c.size() == 3);
+    assert (c.size() <= j);
+  };
+  system func preupgrade(){
+    Prim.debugPrint("preupgrade 1");
+    i := j; // save cache
+  };
+  system func postupgrade(){
+    j := i; // restore cache
+    Prim.debugPrint("postupgrade 1");
+  };
+
+}
+
diff --git a/test/run-drun/stabilization-upgrade-hooks/upgrade-hook2.mo b/test/run-drun/stabilization-upgrade-hooks/upgrade-hook2.mo
new file mode 100644
index 00000000000..980b5ae35a9
--- /dev/null
+++ b/test/run-drun/stabilization-upgrade-hooks/upgrade-hook2.mo
@@ -0,0 +1,21 @@
+import Prim "mo:⛔";
+actor {
+  Prim.debugPrint ("init'ed 2");
+  stable let c : Text = do { assert false; loop {}};
+  stable var i : Nat = do { assert false; loop {}};
+  var j = i; // cached state
+  public func inc() { j += 1; };
+  public query func check(n : Int) : async () {
+    assert (c.size() == 3);
+    assert (j == n);
+  };
+  system func preupgrade(){
+    Prim.debugPrint("preupgrade 2");
+    i := j; // save cache
+  };
+  system func postupgrade(){
+    Prim.debugPrint("postupgrade 2");
+    j := i; // restore cache
+  };
+
+}
diff --git a/test/run-drun/stabilize-beyond-stable-limit.mo b/test/run-drun/stabilize-beyond-stable-limit.mo
new file mode 100644
index 00000000000..a6eb9b42cdc
--- /dev/null
+++ b/test/run-drun/stabilize-beyond-stable-limit.mo
@@ -0,0 +1,46 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+//MOC-FLAG --stabilization-instruction-limit=100000 --max-stable-pages 16
+
+// This test fails because the stabilization code is using virtual stablemem_grow, which caps growth to max-stable-pages (default is 65536, but lowered below). It should be using physical ic0_stable64_grow (and _size) instead.
+
+// Unfixed, the bug would prevent a user relying on defaults from serializing more than 4GB of heap data, even in 64-bit mode.
+
+import Prim "mo:prim";
+
+actor {
+
+    let pages : Nat64 = 16;
+    if (Prim.stableMemorySize() == 0) {
+      Prim.debugPrint("growing stable memory");
+      ignore Prim.stableMemoryGrow(pages);
+    };
+    assert Prim.stableMemorySize() == pages;
+    stable let blob = Prim.stableMemoryLoadBlob(0,65536);
+
+    public func check() : async () {
+        Prim.debugPrint(debug_show (blob.size()))
+    };
+
+    system func preupgrade() {
+        Prim.debugPrint("PRE-UPGRADE HOOK!");
+    };
+
+    system func postupgrade() {
+        Prim.debugPrint("POST-UPGRADE HOOK!");
+    };
+};
+
+//CALL ingress check "DIDL\x00\x00"
+//CALL ingress __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+//CALL upgrade ""
+//CALL ingress __motoko_destabilize_after_upgrade "DIDL\x00\x00"
+//CALL ingress check "DIDL\x00\x00"
+//CALL ingress __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+//CALL upgrade ""
+//CALL ingress __motoko_destabilize_after_upgrade "DIDL\x00\x00"
+//CALL ingress check "DIDL\x00\x00"
+
+//SKIP run
+//SKIP run-ir
+//SKIP run-low
+
diff --git a/test/run-drun/stabilize-bignat-bigint.drun b/test/run-drun/stabilize-bignat-bigint.drun
new file mode 100644
index 00000000000..5cc7dda54b6
--- /dev/null
+++ b/test/run-drun/stabilize-bignat-bigint.drun
@@ -0,0 +1,31 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+# SKIP ic-ref-run
+install $ID stabilize-bignat-bigint/version0.mo ""
+ingress $ID modify "DIDL\x00\x00"
+ingress $ID print "DIDL\x00\x00"
+# Enhanced Orthogonal Persistence
+upgrade $ID stabilize-bignat-bigint/version0.mo ""
+ingress $ID modify "DIDL\x00\x00"
+ingress $ID print "DIDL\x00\x00"
+# Graph-Copy-Based Stabilization
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-bignat-bigint/version0.mo ""
+ingress $ID modify "DIDL\x00\x00"
+ingress $ID print "DIDL\x00\x00"
+# Graph-Copy-Based Stabilization
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-bignat-bigint/version1.mo ""
+ingress $ID modify "DIDL\x00\x00"
+ingress $ID print "DIDL\x00\x00"
+# Graph-Copy-Based Stabilization
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-bignat-bigint/version1.mo ""
+ingress $ID modify "DIDL\x00\x00"
+ingress $ID print "DIDL\x00\x00"
+# Enhanced Orthogonal Persistence
+upgrade $ID stabilize-bignat-bigint/version1.mo ""
+ingress $ID modify "DIDL\x00\x00"
+ingress $ID print "DIDL\x00\x00"
+# Graph-Copy-Based Stabilization
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-bignat-bigint/version0.mo ""
diff --git a/test/run-drun/stabilize-bignat-bigint/version0.mo b/test/run-drun/stabilize-bignat-bigint/version0.mo
new file mode 100644
index 00000000000..a8ad79fdf36
--- /dev/null
+++ b/test/run-drun/stabilize-bignat-bigint/version0.mo
@@ -0,0 +1,13 @@
+import Prim "mo:prim";
+
+actor {
+   stable var number : Nat = 12345678901234567890123456789012345678901234567890123456789012345678901234567890;
+
+   public func modify() : async () {
+      number += 1;
+   };
+
+   public func print() : async () {
+      Prim.debugPrint(debug_show (number));
+   };
+};
diff --git a/test/run-drun/stabilize-bignat-bigint/version1.mo b/test/run-drun/stabilize-bignat-bigint/version1.mo
new file mode 100644
index 00000000000..c7afbe089ff
--- /dev/null
+++ b/test/run-drun/stabilize-bignat-bigint/version1.mo
@@ -0,0 +1,13 @@
+import Prim "mo:prim";
+
+actor {
+   stable var number : Int = -1;
+
+   public func modify() : async () {
+      number -= 2;
+   };
+
+   public func print() : async () {
+      Prim.debugPrint(debug_show (number));
+   };
+};
diff --git a/test/run-drun/stabilize-bignums.mo b/test/run-drun/stabilize-bignums.mo
new file mode 100644
index 00000000000..266d96468cd
--- /dev/null
+++ b/test/run-drun/stabilize-bignums.mo
@@ -0,0 +1,36 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+//MOC-FLAG --stabilization-instruction-limit=10000
+import Prim "mo:prim";
+
+actor {
+    stable var unsigned : Nat = 12345678901234567890123456789012345678901234567890123456789012345678901234567890;
+    stable var signed : Int = -12345678901234567890123456789012345678901234567890123456789012345678901234567890;
+
+    public func modify() : async () {
+        unsigned := 2 * unsigned + 1;
+        signed := 2 * signed - 1;
+    };
+
+    public func print() : async () {
+        Prim.debugPrint(debug_show (unsigned));
+        Prim.debugPrint(debug_show (signed));
+    };
+};
+
+//CALL ingress print "DIDL\x00\x00"
+//CALL ingress modify "DIDL\x00\x00"
+//CALL ingress __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+//CALL upgrade ""
+//CALL ingress __motoko_destabilize_after_upgrade "DIDL\x00\x00"
+//CALL ingress print "DIDL\x00\x00"
+//CALL ingress modify "DIDL\x00\x00"
+//CALL ingress __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+//CALL upgrade ""
+//CALL ingress __motoko_destabilize_after_upgrade "DIDL\x00\x00"
+//CALL ingress print "DIDL\x00\x00"
+//CALL upgrade ""
+//CALL ingress print "DIDL\x00\x00"
+//CALL ingress __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+//CALL upgrade ""
+//CALL ingress __motoko_destabilize_after_upgrade "DIDL\x00\x00"
+//CALL ingress print "DIDL\x00\x00"
diff --git a/test/run-drun/stabilize-blob-iter.drun b/test/run-drun/stabilize-blob-iter.drun
new file mode 100644
index 00000000000..91a66b3d918
--- /dev/null
+++ b/test/run-drun/stabilize-blob-iter.drun
@@ -0,0 +1,16 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+# SKIP ic-ref-run
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+install $ID stabilize-blob-iter/version0.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-blob-iter/version0.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-blob-iter/version1.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-blob-iter/version1.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-blob-iter/version0.mo ""
diff --git a/test/run-drun/stabilize-blob-iter/version0.mo b/test/run-drun/stabilize-blob-iter/version0.mo
new file mode 100644
index 00000000000..68bcb70a02c
--- /dev/null
+++ b/test/run-drun/stabilize-blob-iter/version0.mo
@@ -0,0 +1,27 @@
+import Prim "mo:prim";
+
+actor {
+  let temporary = 1;
+
+  let blobiter = ("hello" : Blob).vals();
+
+  stable let value : {
+    stableField : Text;
+  } = {
+    stableField = "Version 0";
+    nonStableField = blobiter;
+    unreachableField = -123;
+  };
+
+  stable let any : Any = blobiter;
+  stable let tuple : (Int, Any) = (0, blobiter);
+  stable let variant : { #tag : Any } = #tag blobiter;
+  stable let record : { lab : Any } = { lab = blobiter };
+  stable let vector : [Any] = [blobiter];
+  stable let array : [var Any] = [var blobiter];
+  stable let opt : ?Any = ?blobiter;
+
+  public func print() : async () {
+    Prim.debugPrint(debug_show (value));
+  };
+};
diff --git a/test/run-drun/stabilize-blob-iter/version1.mo b/test/run-drun/stabilize-blob-iter/version1.mo
new file mode 100644
index 00000000000..38770680751
--- /dev/null
+++ b/test/run-drun/stabilize-blob-iter/version1.mo
@@ -0,0 +1,19 @@
+import Prim "mo:prim";
+
+actor {
+  stable let value : {} = {
+    stableField = "Version 1";
+  };
+
+  stable let any : Any = null;
+  stable let tuple : (Int, Any) = (0, null);
+  stable let variant : { #tag : Any } = #tag null;
+  stable let record : { lab : Any } = { lab = null };
+  stable let vector : [Any] = [null];
+  stable let array : [var Any] = [var null];
+  stable let opt : ?Any = null;
+
+  public func print() : async () {
+    Prim.debugPrint(debug_show (value));
+  };
+};
diff --git a/test/run-drun/stabilize-boxed-numbers.mo b/test/run-drun/stabilize-boxed-numbers.mo
new file mode 100644
index 00000000000..b0e6790563f
--- /dev/null
+++ b/test/run-drun/stabilize-boxed-numbers.mo
@@ -0,0 +1,26 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+import Prim "mo:prim";
+
+actor {
+  stable let boxedNat32 : Nat32 = 4294967295;
+  stable let boxedInt32 : Int32 = -2147483648;
+  stable let boxedNat64 : Nat64 = 18446744073709551614;
+  stable let boxedInt64 : Int64 = -9223372036854775808;
+
+  public func print() : async () {
+    Prim.debugPrint(debug_show (boxedNat32));
+    Prim.debugPrint(debug_show (boxedInt32));
+    Prim.debugPrint(debug_show (boxedNat64));
+    Prim.debugPrint(debug_show (boxedInt64));
+  };
+};
+
+//CALL ingress print "DIDL\x00\x00"
+//CALL ingress __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+//CALL upgrade ""
+//CALL ingress print "DIDL\x00\x00"
+//CALL ingress __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+//CALL upgrade ""
+//CALL ingress print "DIDL\x00\x00"
+//CALL upgrade ""
+//CALL ingress print "DIDL\x00\x00"
diff --git a/test/run-drun/stabilize-generics.drun b/test/run-drun/stabilize-generics.drun
new file mode 100644
index 00000000000..f148ea4dff8
--- /dev/null
+++ b/test/run-drun/stabilize-generics.drun
@@ -0,0 +1,22 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+# SKIP ic-ref-run
+install $ID stabilize-generics/version0.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-generics/version0.mo ""
+ingress $ID print "DIDL\x00\x01\x7d\x0F"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-generics/version1.mo ""
+ingress $ID print "DIDL\x00\x01\x7d\x0F"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-generics/version1.mo ""
+ingress $ID print "DIDL\x00\x01\x7d\x0F"
+upgrade $ID stabilize-generics/version1.mo ""
+ingress $ID print "DIDL\x00\x01\x7d\x0F"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-generics/version2.mo ""
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-generics/version0.mo ""
+ingress $ID print "DIDL\x00\x01\x7d\x0F"
+upgrade $ID stabilize-generics/version0.mo ""
+ingress $ID print "DIDL\x00\x01\x7d\x0F"
diff --git a/test/run-drun/stabilize-generics/version0.mo b/test/run-drun/stabilize-generics/version0.mo
new file mode 100644
index 00000000000..b64c5ca0fe0
--- /dev/null
+++ b/test/run-drun/stabilize-generics/version0.mo
@@ -0,0 +1,15 @@
+import Prim "mo:prim";
+
+actor {
+
+  type B = [T];
+  type A = (B, B);
+  module X = { public type A = U };
+  type T = (A, X.A);
+
+  stable var value = (([0], [1]), true) : T;
+
+  public func print() : async () {
+    Prim.debugPrint(debug_show (value));
+  };
+};
diff --git a/test/run-drun/stabilize-generics/version1.mo b/test/run-drun/stabilize-generics/version1.mo
new file mode 100644
index 00000000000..ef501febe2d
--- /dev/null
+++ b/test/run-drun/stabilize-generics/version1.mo
@@ -0,0 +1,15 @@
+import Prim "mo:prim";
+
+// Compatible change
+actor {
+
+  type B = [T0];
+  type A = (B, B);
+  type T = (A, Bool);
+
+  stable var value = (([0], [1]), true) : T;
+
+  public func print() : async () {
+    Prim.debugPrint(debug_show (value));
+  };
+};
diff --git a/test/run-drun/stabilize-generics/version2.mo b/test/run-drun/stabilize-generics/version2.mo
new file mode 100644
index 00000000000..7199827d23c
--- /dev/null
+++ b/test/run-drun/stabilize-generics/version2.mo
@@ -0,0 +1,15 @@
+import Prim "mo:prim";
+
+// Incompatible change
+actor {
+
+  type B = [T0];
+  type A = (B, B);
+  type T = (A, Bool);
+
+  stable var value = (([0], [1]), true) : T;
+
+  public func print() : async () {
+    Prim.debugPrint(debug_show (value));
+  };
+};
diff --git a/test/run-drun/stabilize-large-array.mo b/test/run-drun/stabilize-large-array.mo
new file mode 100644
index 00000000000..8a1d50bee0c
--- /dev/null
+++ b/test/run-drun/stabilize-large-array.mo
@@ -0,0 +1,35 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+//MOC-FLAG --stabilization-instruction-limit=10000
+
+import Prim "mo:prim";
+
+actor {
+    stable var largeArray = Prim.Array_tabulate(100_000, func(index) { index });
+
+    public func check() : async () {
+        Prim.debugPrint("Array of length " # debug_show (largeArray.size()));
+        var index = 0;
+        while (index < largeArray.size()) {
+            assert (largeArray[index] == index);
+            index += 1;
+        };
+    };
+
+    system func preupgrade() {
+        Prim.debugPrint("PRE-UPGRADE HOOK!");
+    };
+
+    system func postupgrade() {
+        Prim.debugPrint("POST-UPGRADE HOOK!");
+    };
+};
+
+//CALL ingress check "DIDL\x00\x00"
+//CALL ingress __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+//CALL upgrade ""
+//CALL ingress __motoko_destabilize_after_upgrade "DIDL\x00\x00"
+//CALL ingress check "DIDL\x00\x00"
+
+//SKIP run
+//SKIP run-ir
+//SKIP run-low
diff --git a/test/run-drun/stabilize-mutable-array.drun b/test/run-drun/stabilize-mutable-array.drun
new file mode 100644
index 00000000000..8eda948b7c7
--- /dev/null
+++ b/test/run-drun/stabilize-mutable-array.drun
@@ -0,0 +1,7 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+# SKIP ic-ref-run
+install $ID stabilize-mutable-array/version0.mo ""
+ingress $ID test "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-mutable-array/version1.mo ""
+ingress $ID test "DIDL\x00\x00"
diff --git a/test/run-drun/stabilize-mutable-array/version0.mo b/test/run-drun/stabilize-mutable-array/version0.mo
new file mode 100644
index 00000000000..52f1b329629
--- /dev/null
+++ b/test/run-drun/stabilize-mutable-array/version0.mo
@@ -0,0 +1,22 @@
+import Prim "mo:prim";
+
+actor {
+    stable let instance = [
+        var {
+            firstField = 0;
+            secondField = 0;
+            thirdField = 0;
+        }
+    ];
+
+    stable let alias = instance;
+
+    public func test() : async () {
+        Prim.debugPrint("firstField=" # debug_show (instance[0].firstField));
+        Prim.debugPrint("secondField=" # debug_show (instance[0].secondField));
+        Prim.debugPrint("thirdField=" # debug_show (instance[0].thirdField));
+        Prim.debugPrint("firstField=" # debug_show (alias[0].firstField));
+        Prim.debugPrint("secondField=" # debug_show (alias[0].secondField));
+        Prim.debugPrint("thirdField=" # debug_show (alias[0].thirdField));
+    };
+};
diff --git a/test/run-drun/stabilize-mutable-array/version1.mo b/test/run-drun/stabilize-mutable-array/version1.mo
new file mode 100644
index 00000000000..6d134e47568
--- /dev/null
+++ b/test/run-drun/stabilize-mutable-array/version1.mo
@@ -0,0 +1,28 @@
+import Prim "mo:prim";
+
+actor {
+    stable let instance = [
+        var {
+            secondField = 1;
+        }
+    ];
+
+    stable let alias = [
+        var {
+            firstField = 1;
+            secondField = 1;
+            thirdField = 1;
+        }
+    ];
+
+    public func test() : async () {
+        // This would break type safety if memory compatibility check does not prevent upgrade.
+        instance[0] := { secondField = 2 };
+        // alias[0] would no longer have the firstField and thirdField.
+
+        Prim.debugPrint("secondField=" # debug_show (instance[0].secondField));
+        Prim.debugPrint("firstField=" # debug_show (alias[0].firstField));
+        Prim.debugPrint("secondField=" # debug_show (alias[0].secondField));
+        Prim.debugPrint("thirdField=" # debug_show (alias[0].thirdField));
+    };
+};
diff --git a/test/run-drun/stabilize-mutable-service.drun b/test/run-drun/stabilize-mutable-service.drun
new file mode 100644
index 00000000000..6100e272cc9
--- /dev/null
+++ b/test/run-drun/stabilize-mutable-service.drun
@@ -0,0 +1,12 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+# SKIP ic-ref-run
+install $ID stabilize-mutable-service/version0.mo ""
+ingress $ID initialize "DIDL\x00\x00"
+ingress $ID test "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-mutable-service/version0.mo ""
+ingress $ID test "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-mutable-service/version1.mo ""
+ingress $ID initialize "DIDL\x00\x01\x7d\x0F"
+ingress $ID test "DIDL\x00\x00"
diff --git a/test/run-drun/stabilize-mutable-service/original-service.mo b/test/run-drun/stabilize-mutable-service/original-service.mo
new file mode 100644
index 00000000000..5cfefeb2ef3
--- /dev/null
+++ b/test/run-drun/stabilize-mutable-service/original-service.mo
@@ -0,0 +1,19 @@
+import Prim "mo:prim";
+
+actor class OriginalActor() {
+    public func test0() : async () {
+        Prim.debugPrint("original test0");
+    };
+
+    public func test1() : async () {
+        Prim.debugPrint("original test1");
+    };
+
+    public func test2() : async () {
+        Prim.debugPrint("original test2");
+    };
+    
+    public func test3() : async () {
+        Prim.debugPrint("original test3");
+    };
+};
diff --git a/test/run-drun/stabilize-mutable-service/reduced-service.mo b/test/run-drun/stabilize-mutable-service/reduced-service.mo
new file mode 100644
index 00000000000..b274c09b3ae
--- /dev/null
+++ b/test/run-drun/stabilize-mutable-service/reduced-service.mo
@@ -0,0 +1,11 @@
+import Prim "mo:prim";
+
+actor class ReducedActor() {
+    public func test1() : async () {
+        Prim.debugPrint("reduced test1");
+    };
+
+    public func test3() : async () {
+        Prim.debugPrint("reduced test3");
+    };
+};
diff --git a/test/run-drun/stabilize-mutable-service/version0.mo b/test/run-drun/stabilize-mutable-service/version0.mo
new file mode 100644
index 00000000000..eef8f85d250
--- /dev/null
+++ b/test/run-drun/stabilize-mutable-service/version0.mo
@@ -0,0 +1,42 @@
+import Prim "mo:prim";
+import Cycles = "../cycles/cycles";
+import OriginalService "original-service";
+
+actor this {
+    type OriginalActor = actor {
+        test0() : async ();
+        test1() : async ();
+        test2() : async ();
+        test3() : async ();
+    };
+
+    stable var instance = [var null : ?OriginalActor];
+    stable var alias = instance;
+
+    public func initialize() : async () {
+        await Cycles.provisional_top_up_actor(this, 100_000_000_000_000);
+        Cycles.add(2_000_000_000_000);
+        instance[0] := ?(await OriginalService.OriginalActor());
+    };
+
+    public func test() : async () {
+        switch (instance[0]) {
+            case (?instance) {
+                await instance.test0();
+                await instance.test1();
+                await instance.test2();
+                await instance.test3();
+            };
+            case null Prim.trap("Null");
+        };
+        switch (alias[0]) {
+            case (?alias) {
+                await alias.test0();
+                await alias.test1();
+                await alias.test2();
+                await alias.test3();
+            };
+            case null Prim.trap("Null");
+        };
+    };
+};
diff --git a/test/run-drun/stabilize-mutable-service/version1.mo b/test/run-drun/stabilize-mutable-service/version1.mo
new file mode 100644
index 00000000000..17f3f968008
--- /dev/null
+++ b/test/run-drun/stabilize-mutable-service/version1.mo
@@ -0,0 +1,44 @@
+import Prim "mo:prim";
+import Cycles = "../cycles/cycles";
+import ReducedService "reduced-service";
+
+actor this {
+    type OriginalActor = actor {
+        test0() : async ();
+        test1() : async ();
+        test2() : async ();
+        test3() : async ();
+    };
+
+    type ReducedActor = actor {
+        test1() : async ();
+        test3() : async ();
+    };
+
+    stable var instance = [var null : ?ReducedActor];
+    stable var alias = [var null : ?OriginalActor];
+
+    public func initialize() : async () {
+        Cycles.add(2_000_000_000_000);
+        instance[0] := ?(await ReducedService.ReducedActor());
+    };
+
+    public func test() : async () {
+        switch (instance[0]) {
+            case (?instance) {
+                await instance.test1();
+                await instance.test3();
+            };
+            case null Prim.trap("Null");
+        };
+        switch (alias[0]) {
+            case (?alias) {
+                await alias.test0();
+                await alias.test1();
+                await alias.test2();
+                await alias.test3();
+            };
+            case null Prim.trap("Null");
+        };
+    };
+};
diff --git a/test/run-drun/stabilize-mutable-variant.drun b/test/run-drun/stabilize-mutable-variant.drun
new file mode 100644
index 00000000000..af30b520348
--- /dev/null
+++ b/test/run-drun/stabilize-mutable-variant.drun
@@ -0,0 +1,7 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+# SKIP ic-ref-run
+install $ID stabilize-mutable-variant/version0.mo ""
+ingress $ID test "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-mutable-variant/version1.mo ""
+ingress $ID test "DIDL\x00\x00"
diff --git a/test/run-drun/stabilize-mutable-variant/version0.mo b/test/run-drun/stabilize-mutable-variant/version0.mo
new file mode 100644
index 00000000000..00a90693ae9
--- /dev/null
+++ b/test/run-drun/stabilize-mutable-variant/version0.mo
@@ -0,0 +1,16 @@
+import Prim "mo:prim";
+
+actor {
+    type OriginalVariant = {
+        #Option1;
+        #Option2;
+    };
+
+    stable let instance = [ var #Option1: OriginalVariant ];
+    stable let alias = instance;
+
+    public func test() : async () {
+        Prim.debugPrint("instance=" # debug_show (instance));
+        Prim.debugPrint("alias=" # debug_show (alias));
+    };
+};
diff --git a/test/run-drun/stabilize-mutable-variant/version1.mo b/test/run-drun/stabilize-mutable-variant/version1.mo
new file mode 100644
index 00000000000..241a83ab89b
--- /dev/null
+++ b/test/run-drun/stabilize-mutable-variant/version1.mo
@@ -0,0 +1,26 @@
+import Prim "mo:prim";
+
+actor {
+    type OriginalVariant = {
+        #Option1;
+        #Option2;
+    };
+
+    type ExtendedVariant = {
+        #Option0;
+        #Option1;
+        #Option2;
+    };
+
+    stable let instance = [ var #Option1: OriginalVariant ];
+    stable let alias = [ var #Option1: ExtendedVariant ];
+
+    public func test() : async () {
+        // Would break type safety if not rejected by the memory compatibility check of the upgrade.
+        alias[0] := #Option0;
+        // instance would observe invalid enum value.
+
+        Prim.debugPrint("instance=" # debug_show (instance));
+        Prim.debugPrint("alias=" # debug_show (alias));
+    };
+};
diff --git a/test/run-drun/stabilize-mutable.drun b/test/run-drun/stabilize-mutable.drun
new file mode 100644
index 00000000000..567ba7662db
--- /dev/null
+++ b/test/run-drun/stabilize-mutable.drun
@@ -0,0 +1,14 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+# SKIP ic-ref-run
+install $ID stabilize-mutable/version0.mo ""
+ingress $ID modify "DIDL\x00\x00"
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-mutable/version0.mo ""
+ingress $ID modify "DIDL\x00\x00"
+ingress $ID print "DIDL\x00\x00"
+upgrade $ID stabilize-mutable/version0.mo ""
+ingress $ID modify "DIDL\x00\x00"
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-mutable/version1.mo ""
diff --git a/test/run-drun/stabilize-mutable/version0.mo b/test/run-drun/stabilize-mutable/version0.mo
new file mode 100644
index 00000000000..058768fbd04
--- /dev/null
+++ b/test/run-drun/stabilize-mutable/version0.mo
@@ -0,0 +1,18 @@
+import Prim "mo:prim";
+
+actor {
+   type Entry = { key: Nat; value: Text; };
+   stable var array1: [var Entry] = [ var { key = 1; value = "1" }];
+   stable var array2: [var Entry] = array1;
+   
+   public func modify() : async () {
+      array1[0] := { key = 2; value = "2" };
+      assert(array2[0].key == 2);
+      assert(array2[0].value == "2");
+   };
+
+   public func print() : async () {
+      Prim.debugPrint(debug_show(array1));
+      Prim.debugPrint(debug_show(array2));
+   };
+};
diff --git a/test/run-drun/stabilize-mutable/version1.mo b/test/run-drun/stabilize-mutable/version1.mo
new file mode 100644
index 00000000000..be9424f3d7e
--- /dev/null
+++ b/test/run-drun/stabilize-mutable/version1.mo
@@ -0,0 +1,20 @@
+import Prim "mo:prim";
+
+actor {
+   type Entry = { key : Nat; value : Text };
+   type SubEntry = { key : Nat; };
+   
+   stable var array1 : [var Entry] = [var { key = 1; value = "1" }];
+   stable var array2 : [var SubEntry] = [var];
+
+   public func modify() : async () {
+      array2[0] := { key = 3; };
+      assert (array1[0].key == 3);
+      Prim.debugPrint(array1[0].value);
+   };
+
+   public func print() : async () {
+      Prim.debugPrint(debug_show (array1));
+      Prim.debugPrint(debug_show (array2));
+   };
+};
diff --git a/test/run-drun/stabilize-nat-to-int.drun b/test/run-drun/stabilize-nat-to-int.drun
new file mode 100644
index 00000000000..50468c57cc6
--- /dev/null
+++ b/test/run-drun/stabilize-nat-to-int.drun
@@ -0,0 +1,20 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+# SKIP ic-ref-run
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+install $ID stabilize-nat-to-int/version0.mo ""
+ingress $ID modify "DIDL\x00\x00"
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-nat-to-int/version0.mo ""
+ingress $ID modify "DIDL\x00\x00"
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-nat-to-int/version1.mo ""
+ingress $ID modify "DIDL\x00\x00"
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-nat-to-int/version1.mo ""
+ingress $ID modify "DIDL\x00\x00"
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-nat-to-int/version0.mo ""
diff --git a/test/run-drun/stabilize-nat-to-int/version0.mo b/test/run-drun/stabilize-nat-to-int/version0.mo
new file mode 100644
index 00000000000..53f13ea1874
--- /dev/null
+++ b/test/run-drun/stabilize-nat-to-int/version0.mo
@@ -0,0 +1,13 @@
+import Prim "mo:prim";
+
+actor {
+   stable var number : Nat = 0;
+
+   public func modify() : async () {
+      number += 1;
+   };
+
+   public func print() : async () {
+      Prim.debugPrint(debug_show (number));
+   };
+};
diff --git a/test/run-drun/stabilize-nat-to-int/version1.mo b/test/run-drun/stabilize-nat-to-int/version1.mo
new file mode 100644
index 00000000000..0c20ef697c4
--- /dev/null
+++ b/test/run-drun/stabilize-nat-to-int/version1.mo
@@ -0,0 +1,13 @@
+import Prim "mo:prim";
+
+actor {
+   stable var number : Int = -123;
+
+   public func modify() : async () {
+      number -= 2;
+   };
+
+   public func print() : async () {
+      Prim.debugPrint(debug_show (number));
+   };
+};
diff --git a/test/run-drun/stabilize-non-stable.drun b/test/run-drun/stabilize-non-stable.drun
new file mode 100644
index 00000000000..56140eb8e0c
--- /dev/null
+++ b/test/run-drun/stabilize-non-stable.drun
@@ -0,0 +1,16 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+# SKIP ic-ref-run
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+install $ID stabilize-non-stable/version0.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-non-stable/version0.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-non-stable/version1.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-non-stable/version1.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-non-stable/version0.mo ""
diff --git a/test/run-drun/stabilize-non-stable/version0.mo b/test/run-drun/stabilize-non-stable/version0.mo
new file mode 100644
index 00000000000..87a49e87f0c
--- /dev/null
+++ b/test/run-drun/stabilize-non-stable/version0.mo
@@ -0,0 +1,29 @@
+import Prim "mo:prim";
+
+actor {
+  let temporary = 1;
+
+  func f() {
+    Prim.debugPrint(debug_show (temporary));
+  };
+
+  stable let value : {
+    stableField : Text;
+  } = {
+    stableField = "Version 0";
+    nonStableField = f;
+    unreachableField = -123;
+  };
+
+  stable let any : Any = f;
+  stable let tuple : (Int, Any) = (0, f);
+  stable let variant : { #tag : Any } = #tag f;
+  stable let record : { lab : Any } = { lab = f };
+  stable let vector : [Any] = [f];
+  stable let array : [var Any] = [var f];
+  stable let opt : ?Any = ?f;
+
+  public func print() : async () {
+    Prim.debugPrint(debug_show (value));
+  };
+};
diff --git a/test/run-drun/stabilize-non-stable/version1.mo b/test/run-drun/stabilize-non-stable/version1.mo
new file mode 100644
index 00000000000..38770680751
--- /dev/null
+++ b/test/run-drun/stabilize-non-stable/version1.mo
@@ -0,0 +1,19 @@
+import Prim "mo:prim";
+
+actor {
+  stable let value : {} = {
+    stableField = "Version 1";
+  };
+
+  stable let any : Any = null;
+  stable let tuple : (Int, Any) = (0, null);
+  stable let variant : { #tag : Any } = #tag null;
+  stable let record : { lab : Any } = { lab = null };
+  stable let vector : [Any] = [null];
+  stable let array : [var Any] = [var null];
+  stable let opt : ?Any = null;
+
+  public func print() : async () {
+    Prim.debugPrint(debug_show (value));
+  };
+};
diff --git a/test/run-drun/stabilize-optional.drun b/test/run-drun/stabilize-optional.drun
new file mode 100644
index 00000000000..0da7c2d55df
--- /dev/null
+++ b/test/run-drun/stabilize-optional.drun
@@ -0,0 +1,21 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+# SKIP ic-ref-run
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+install $ID stabilize-optional/version0.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-optional/version0.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-optional/version1.mo ""
+upgrade $ID stabilize-optional/version2.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-optional/version2.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-optional/version2.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-optional/version2.mo ""
+upgrade $ID stabilize-optional/version2.mo ""
\ No newline at end of file
diff --git a/test/run-drun/stabilize-optional/version0.mo b/test/run-drun/stabilize-optional/version0.mo
new file mode 100644
index 00000000000..51958e571ab
--- /dev/null
+++ b/test/run-drun/stabilize-optional/version0.mo
@@ -0,0 +1,9 @@
+import Prim "mo:prim";
+
+actor {
+  stable let value: ???{} = ??null;
+
+  public func print() : async () {
+    Prim.debugPrint(debug_show (value));
+  };
+};
diff --git a/test/run-drun/stabilize-optional/version1.mo b/test/run-drun/stabilize-optional/version1.mo
new file mode 100644
index 00000000000..880646e7ede
--- /dev/null
+++ b/test/run-drun/stabilize-optional/version1.mo
@@ -0,0 +1,9 @@
+import Prim "mo:prim";
+
+actor {
+  stable let value: ???{} = ?null;
+
+  public func print() : async () {
+    Prim.debugPrint(debug_show (value));
+  };
+};
diff --git a/test/run-drun/stabilize-optional/version2.mo b/test/run-drun/stabilize-optional/version2.mo
new file mode 100644
index 00000000000..d0437c6ad8b
--- /dev/null
+++ b/test/run-drun/stabilize-optional/version2.mo
@@ -0,0 +1,9 @@
+import Prim "mo:prim";
+
+actor {
+  stable let value: ???{} = null;
+
+  public func print() : async () {
+    Prim.debugPrint(debug_show (value));
+  };
+};
diff --git a/test/run-drun/stabilize-primitive-types.mo b/test/run-drun/stabilize-primitive-types.mo
new file mode 100644
index 00000000000..c0277c8a47c
--- /dev/null
+++ b/test/run-drun/stabilize-primitive-types.mo
@@ -0,0 +1,73 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+import Prim "mo:prim";
+
+actor {
+    stable var nullVariable = null;
+    stable var boolVariable = false;
+    stable var natVariable = 1234567890123456789012345678901234567890;
+    stable var nat8Variable = 123: Nat8;
+    stable var nat16Variable = 12345: Nat16;
+    stable var nat32Variable = 1234567890: Nat32;
+    stable var nat64Variable = 123456789012345: Nat64;
+    stable var intVariable = -1234567890123456789012345678901234567890;
+    stable var int8Variable = -123: Int8;
+    stable var int16Variable = -12345: Int16;
+    stable var int32Variable = -1234567890: Int32;
+    stable var int64Variable = -123456789012345: Int64;
+    stable var floatVariable = 1.234567890;
+    stable var charVariable = '!';
+    stable var textVariable = "\"TEST\"";
+    stable var blobVariable = "abc": Blob;
+    stable var principalVariable = Prim.principalOfBlob ("\00\00\00\00\00\30\00\D3\01\01": Blob);
+
+    Prim.debugPrint("Initialized");
+
+    public func increase() : async () {
+        nullVariable := null;
+        boolVariable := not boolVariable;
+        natVariable += 1;
+        nat8Variable += 1;
+        nat16Variable += 1;
+        nat32Variable += 1;
+        nat64Variable += 1;
+        floatVariable += 0.1;
+        charVariable := if (charVariable == '!') { '?' } else { '!' };
+        textVariable #= " \"TEST\"";
+    };
+
+    public func print() : async () {
+        Prim.debugPrint(debug_show(nullVariable));
+        Prim.debugPrint(debug_show(boolVariable));
+        Prim.debugPrint(debug_show(natVariable));
+        Prim.debugPrint(debug_show(nat8Variable));
+        Prim.debugPrint(debug_show(nat16Variable));
+        Prim.debugPrint(debug_show(nat32Variable));
+        Prim.debugPrint(debug_show(nat64Variable));
+        Prim.debugPrint(debug_show(intVariable));
+        Prim.debugPrint(debug_show(int8Variable));
+        Prim.debugPrint(debug_show(int16Variable));
+        Prim.debugPrint(debug_show(int32Variable));
+        Prim.debugPrint(debug_show(int64Variable));
+        Prim.debugPrint(debug_show(floatVariable));
+        Prim.debugPrint(debug_show(charVariable));
+        Prim.debugPrint(debug_show(textVariable));
+        Prim.debugPrint(debug_show(blobVariable));
+        Prim.debugPrint(debug_show(principalVariable));
+    };
+}
+
+//SKIP run
+//SKIP run-low
+//SKIP run-ir
+//SKIP comp-ref
+//CALL ingress print "DIDL\x00\x00"
+//CALL ingress __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+//CALL upgrade ""
+//CALL ingress increase "DIDL\x00\x00"
+//CALL ingress print "DIDL\x00\x00"
+//CALL ingress __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+//CALL upgrade ""
+//CALL ingress increase "DIDL\x00\x00"
+//CALL ingress print "DIDL\x00\x00"
+//CALL upgrade ""
+//CALL ingress print "DIDL\x00\x00"
diff --git a/test/run-drun/stabilize-recursive-type.drun b/test/run-drun/stabilize-recursive-type.drun
new file mode 100644
index 00000000000..e6658c62742
--- /dev/null
+++ b/test/run-drun/stabilize-recursive-type.drun
@@ -0,0 +1,17 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+# SKIP ic-ref-run
+install $ID stabilize-recursive-type/version0.mo ""
+ingress $ID increase "DIDL\x00\x01\x7d\x0F"
+ingress $ID check "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x01\x7d\x0F"
+upgrade $ID stabilize-recursive-type/version0.mo ""
+ingress $ID increase "DIDL\x00\x01\x7d\x0F"
+ingress $ID check "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x01\x7d\x0F"
+upgrade $ID stabilize-recursive-type/version1.mo ""
+ingress $ID increase "DIDL\x00\x01\x7d\x0F"
+ingress $ID check "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x01\x7d\x0F"
+upgrade $ID stabilize-recursive-type/version1.mo ""
+ingress $ID increase "DIDL\x00\x01\x7d\x0F"
+ingress $ID check "DIDL\x00\x00"
diff --git a/test/run-drun/stabilize-recursive-type/version0.mo b/test/run-drun/stabilize-recursive-type/version0.mo
new file mode 100644
index 00000000000..84d95f3aa18
--- /dev/null
+++ b/test/run-drun/stabilize-recursive-type/version0.mo
@@ -0,0 +1,31 @@
+import Prim "mo:prim";
+
+actor {
+    type Node = {
+        var value : Nat;
+        var next : ?Node;
+        var test : Nat;
+    };
+
+    let node = { var value = 0; var next = null : ?Node; var test = 0 };
+    node.next := ?node;
+
+    stable var root = node;
+
+    public func increase() : async () {
+        root.value += 1;
+        root.test := root.value;
+    };
+
+    public func check() : async () {
+        Prim.debugPrint("CHECK " # debug_show (root.value));
+        assert (root.value == root.test);
+        switch (root.next) {
+            case null Prim.trap("");
+            case (?next) {
+                assert (next.value == root.value);
+                assert (next.value == root.test);
+            };
+        };
+    };
+};
diff --git a/test/run-drun/stabilize-recursive-type/version1.mo b/test/run-drun/stabilize-recursive-type/version1.mo
new file mode 100644
index 00000000000..a840ea723d8
--- /dev/null
+++ b/test/run-drun/stabilize-recursive-type/version1.mo
@@ -0,0 +1,25 @@
+import Prim "mo:prim";
+
+// Incompatible change (mutable invariance).
+actor {
+    type Node = {
+        var value : Nat;
+        var next : ?Node;
+    };
+
+    stable var root = { var value = 0; var next = null : ?Node };
+
+    public func increase() : async () {
+        root.value += 1;
+    };
+
+    public func check() : async () {
+        Prim.debugPrint("CHECK " # debug_show (root.value));
+        switch (root.next) {
+            case null Prim.trap("");
+            case (?next) {
+                assert (next.value == root.value);
+            };
+        };
+    };
+};
diff --git a/test/run-drun/stabilize-remove-add-fields.drun b/test/run-drun/stabilize-remove-add-fields.drun
new file mode 100644
index 00000000000..a72e38b3aba
--- /dev/null
+++ b/test/run-drun/stabilize-remove-add-fields.drun
@@ -0,0 +1,25 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+# SKIP ic-ref-run
+install $ID stabilize-remove-add-fields/version0.mo ""
+ingress $ID increase "DIDL\x00\x01\x7d\x0F"
+ingress $ID show "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x01\x7d\x0F"
+upgrade $ID stabilize-remove-add-fields/version0.mo ""
+ingress $ID increase "DIDL\x00\x01\x7d\x0F"
+ingress $ID show "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x01\x7d\x0F"
+upgrade $ID stabilize-remove-add-fields/version1.mo ""
+ingress $ID increase "DIDL\x00\x01\x7d\x0F"
+ingress $ID show "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x01\x7d\x0F"
+upgrade $ID stabilize-remove-add-fields/version1.mo ""
+ingress $ID increase "DIDL\x00\x01\x7d\x0F"
+ingress $ID show "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x01\x7d\x0F"
+upgrade $ID stabilize-remove-add-fields/version2.mo ""
+ingress $ID increase "DIDL\x00\x01\x7d\x0F"
+ingress $ID show "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x01\x7d\x0F"
+upgrade $ID stabilize-remove-add-fields/version2.mo ""
+ingress $ID increase "DIDL\x00\x01\x7d\x0F"
+ingress $ID show "DIDL\x00\x00"
diff --git a/test/run-drun/stabilize-remove-add-fields/version0.mo b/test/run-drun/stabilize-remove-add-fields/version0.mo
new file mode 100644
index 00000000000..cc7330c63bd
--- /dev/null
+++ b/test/run-drun/stabilize-remove-add-fields/version0.mo
@@ -0,0 +1,21 @@
+import Prim "mo:prim";
+
+actor {
+    stable var instance = {
+        var firstField = 0;
+        var secondField = 0;
+        var thirdField = 0;
+    };
+
+    public func increase() : async () {
+        instance.firstField += 1;
+        instance.secondField += 1;
+        instance.thirdField += 1;
+    };
+
+    public func show() : async () {
+        Prim.debugPrint("firstField=" # debug_show (instance.firstField));
+        Prim.debugPrint("secondField=" # debug_show (instance.secondField));
+        Prim.debugPrint("thirdField=" # debug_show (instance.thirdField));
+    };
+};
diff --git a/test/run-drun/stabilize-remove-add-fields/version1.mo b/test/run-drun/stabilize-remove-add-fields/version1.mo
new file mode 100644
index 00000000000..6a21d341207
--- /dev/null
+++ b/test/run-drun/stabilize-remove-add-fields/version1.mo
@@ -0,0 +1,15 @@
+import Prim "mo:prim";
+
+actor {
+    stable var instance = {
+        var secondField = 0;
+    };
+
+    public func increase() : async () {
+        instance.secondField += 1;
+    };
+
+    public func show() : async () {
+        Prim.debugPrint("secondField=" # debug_show (instance.secondField));
+    };
+};
diff --git a/test/run-drun/stabilize-remove-add-fields/version2.mo b/test/run-drun/stabilize-remove-add-fields/version2.mo
new file mode 100644
index 00000000000..fe72a1fdc7f
--- /dev/null
+++ b/test/run-drun/stabilize-remove-add-fields/version2.mo
@@ -0,0 +1,18 @@
+import Prim "mo:prim";
+
+actor {
+    stable var instance = {
+        var secondField = 0;
+        var newField = 0;
+    };
+
+    public func increase() : async () {
+        instance.secondField += 1;
+        instance.newField += 1;
+    };
+
+    public func show() : async () {
+        Prim.debugPrint("secondField=" # debug_show (instance.secondField));
+        Prim.debugPrint("newField=" # debug_show (instance.newField));
+    };
+};
diff --git a/test/run-drun/stabilize-service.drun b/test/run-drun/stabilize-service.drun
new file mode 100644
index 00000000000..47db80d5e82
--- /dev/null
+++ b/test/run-drun/stabilize-service.drun
@@ -0,0 +1,18 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+# SKIP ic-ref-run
+install $ID stabilize-service/version0.mo ""
+ingress $ID initialize "DIDL\x00\x00"
+ingress $ID test "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-service/version0.mo ""
+ingress $ID test "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-service/version1.mo ""
+ingress $ID initialize "DIDL\x00\x01\x7d\x0F"
+ingress $ID test "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-service/version1.mo ""
+ingress $ID test "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-service/version0.mo ""
+ingress $ID test "DIDL\x00\x00"
diff --git a/test/run-drun/stabilize-service/original-service.mo b/test/run-drun/stabilize-service/original-service.mo
new file mode 100644
index 00000000000..a1f18a67e07
--- /dev/null
+++ b/test/run-drun/stabilize-service/original-service.mo
@@ -0,0 +1,11 @@
+import Prim "mo:prim";
+
+actor class OriginalActor() {
+    public func test1() : async () {
+        Prim.debugPrint("original test1");
+    };
+
+    public func test2() : async () {
+        Prim.debugPrint("original test2");
+    };
+};
diff --git a/test/run-drun/stabilize-service/reduced-service.mo b/test/run-drun/stabilize-service/reduced-service.mo
new file mode 100644
index 00000000000..400a36c5373
--- /dev/null
+++ b/test/run-drun/stabilize-service/reduced-service.mo
@@ -0,0 +1,7 @@
+import Prim "mo:prim";
+
+actor class ReducedActor() {
+    public func test1() : async () {
+        Prim.debugPrint("reduced test1");
+    };
+};
diff --git a/test/run-drun/stabilize-service/version0.mo b/test/run-drun/stabilize-service/version0.mo
new file mode 100644
index 00000000000..b20a14d49bb
--- /dev/null
+++ b/test/run-drun/stabilize-service/version0.mo
@@ -0,0 +1,28 @@
+import Prim "mo:prim";
+import Cycles = "../cycles/cycles";
+import OriginalService "original-service";
+
+actor this {
+    type OriginalActor = actor {
+        test1() : async ();
+        test2() : async ();
+    };
+
+    stable var instance : ?OriginalActor = null;
+
+    public func initialize() : async () {
+        await Cycles.provisional_top_up_actor(this, 100_000_000_000_000);
+        Cycles.add(2_000_000_000_000);
+        instance := ?(await OriginalService.OriginalActor());
+    };
+
+    public func test() : async () {
+        switch instance {
+            case (?instance) {
+                await instance.test1();
+                await instance.test2();
+            };
+            case null Prim.trap("Null");
+        };
+    };
+};
diff --git a/test/run-drun/stabilize-service/version1.mo b/test/run-drun/stabilize-service/version1.mo
new file mode 100644
index 00000000000..a48bc474128
--- /dev/null
+++ b/test/run-drun/stabilize-service/version1.mo
@@ -0,0 +1,30 @@
+import Prim "mo:prim";
+import Cycles = "../cycles/cycles";
+import ReducedService "reduced-service";
+
+actor this {
+    type OriginalActor = actor {
+        test1() : async ();
+        test2() : async ();
+    };
+
+    type ReducedActor = actor {
+        test1() : async ();
+    };
+
+    stable var instance : ?ReducedActor = null;
+
+    public func initialize() : async () {
+        Cycles.add(2_000_000_000_000);
+        instance := ?(await ReducedService.ReducedActor());
+    };
+
+    public func test() : async () {
+        switch instance {
+            case (?instance) {
+                await instance.test1();
+            };
+            case null Prim.trap("Null");
+        };
+    };
+};
diff --git a/test/run-drun/stabilize-text-iter.drun b/test/run-drun/stabilize-text-iter.drun
new file mode 100644
index 00000000000..ed94b6f96df
--- /dev/null
+++ b/test/run-drun/stabilize-text-iter.drun
@@ -0,0 +1,16 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+# SKIP ic-ref-run
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+install $ID stabilize-text-iter/version0.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-text-iter/version0.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-text-iter/version1.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-text-iter/version1.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-text-iter/version0.mo ""
diff --git a/test/run-drun/stabilize-text-iter/version0.mo b/test/run-drun/stabilize-text-iter/version0.mo
new file mode 100644
index 00000000000..3cb3dabcbe5
--- /dev/null
+++ b/test/run-drun/stabilize-text-iter/version0.mo
@@ -0,0 +1,27 @@
+import Prim "mo:prim";
+
+actor {
+  let temporary = 1;
+
+  let textiter = "hello".chars();
+
+  stable let value : {
+    stableField : Text;
+  } = {
+    stableField = "Version 0";
+    nonStableField = textiter;
+    unreachableField = -123;
+  };
+
+  stable let any : Any = textiter;
+  stable let tuple : (Int, Any) = (0, textiter);
+  stable let variant : { #tag : Any } = #tag textiter;
+  stable let record : { lab : Any } = { lab = textiter };
+  stable let vector : [Any] = [textiter];
+  stable let array : [var Any] = [var textiter];
+  stable let opt : ?Any = ?textiter;
+
+  public func print() : async () {
+    Prim.debugPrint(debug_show (value));
+  };
+};
diff --git a/test/run-drun/stabilize-text-iter/version1.mo b/test/run-drun/stabilize-text-iter/version1.mo
new file mode 100644
index 00000000000..38770680751
--- /dev/null
+++ b/test/run-drun/stabilize-text-iter/version1.mo
@@ -0,0 +1,19 @@
+import Prim "mo:prim";
+
+actor {
+  stable let value : {} = {
+    stableField = "Version 1";
+  };
+
+  stable let any : Any = null;
+  stable let tuple : (Int, Any) = (0, null);
+  stable let variant : { #tag : Any } = #tag null;
+  stable let record : { lab : Any } = { lab = null };
+  stable let vector : [Any] = [null];
+  stable let array : [var Any] = [var null];
+  stable let opt : ?Any = null;
+
+  public func print() : async () {
+    Prim.debugPrint(debug_show (value));
+  };
+};
diff --git a/test/run-drun/stabilize-to-any.drun b/test/run-drun/stabilize-to-any.drun
new file mode 100644
index 00000000000..e06d4771999
--- /dev/null
+++ b/test/run-drun/stabilize-to-any.drun
@@ -0,0 +1,15 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+# SKIP ic-ref-run
+install $ID stabilize-to-any/version0.mo ""
+ingress $ID check "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-to-any/version1.mo ""
+ingress $ID __motoko_gc_trigger "DIDL\x00\x00"
+ingress $ID check "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-to-any/version2.mo ""
+ingress $ID __motoko_gc_trigger "DIDL\x00\x00"
+ingress $ID check "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID stabilize-to-any/version2.mo ""
+ingress $ID check "DIDL\x00\x00"
diff --git a/test/run-drun/stabilize-to-any/version0.mo b/test/run-drun/stabilize-to-any/version0.mo
new file mode 100644
index 00000000000..5763645a096
--- /dev/null
+++ b/test/run-drun/stabilize-to-any/version0.mo
@@ -0,0 +1,23 @@
+import Prim "mo:prim";
+
+actor {
+   Prim.debugPrint("Version 0");
+
+   let allocationSize = 64 * 1024 * 1024;
+
+   func largeAllocation(name: Text): [var Nat] {
+      Prim.debugPrint("Initialize " # name);
+      Prim.Array_init(allocationSize / 8, 0);
+   };
+
+   stable var firstVariable : [var Nat] = largeAllocation("first variable");
+
+   public func check(): async() {
+      // Extra GC increments.
+      await async {};
+      await async {};
+      await async {};
+      // Check that first variable has been cleared and the first array has been reclaimed.
+      assert(Prim.rts_heap_size() >= allocationSize);
+   };
+};
diff --git a/test/run-drun/stabilize-to-any/version1.mo b/test/run-drun/stabilize-to-any/version1.mo
new file mode 100644
index 00000000000..7eb6eb1d543
--- /dev/null
+++ b/test/run-drun/stabilize-to-any/version1.mo
@@ -0,0 +1,25 @@
+import Prim "mo:prim";
+
+actor {
+   Prim.debugPrint("Version 1");
+
+   let allocationSize = 64 * 1024 * 1024;
+
+   func largeAllocation(name: Text): [var Nat] {
+      Prim.debugPrint("Initialize " # name);
+      Prim.Array_init(allocationSize / 8, 0);
+   };
+
+   stable var firstVariable : Any = largeAllocation("first variable");
+   stable var secondVariable : Any = largeAllocation("second variable");
+   
+   public func check(): async() {
+      // Extra GC increments.
+      await async {};
+      await async {};
+      await async {};
+      // Check that first variable has been cleared and the first array has been reclaimed.
+      assert(Prim.rts_heap_size() >= allocationSize);
+      assert(Prim.rts_heap_size() < 2 * allocationSize);
+   };
+};
diff --git a/test/run-drun/stabilize-to-any/version2.mo b/test/run-drun/stabilize-to-any/version2.mo
new file mode 100644
index 00000000000..4fe5c28804e
--- /dev/null
+++ b/test/run-drun/stabilize-to-any/version2.mo
@@ -0,0 +1,23 @@
+import Prim "mo:prim";
+
+actor {
+   Prim.debugPrint("Version 2");
+
+   let allocationSize = 64 * 1024 * 1024;
+
+   func largeAllocation(name: Text): [var Nat] {
+      Prim.trap("Should not be called");
+   };
+
+   stable var firstVariable : Any = largeAllocation("first variable");
+   stable var secondVariable : Any = largeAllocation("second variable");
+
+   public func check(): async() {
+      // Extra GC increments.
+      await async {};
+      await async {};
+      await async {};
+      // Check that both variables have been cleared and both arrays have been reclaimed.
+      assert(Prim.rts_heap_size() < allocationSize);
+   }
+};
diff --git a/test/run-drun/stabilize-variant.drun b/test/run-drun/stabilize-variant.drun
new file mode 100644
index 00000000000..b572cc59e33
--- /dev/null
+++ b/test/run-drun/stabilize-variant.drun
@@ -0,0 +1,19 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+# SKIP ic-ref-run
+install $ID stabilize-variant/version0.mo ""
+ingress $ID modify "DIDL\x00\x01\x7d\x0F"
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x01\x7d\x0F"
+upgrade $ID stabilize-variant/version0.mo ""
+ingress $ID modify "DIDL\x00\x01\x7d\x0F"
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x01\x7d\x0F"
+upgrade $ID stabilize-variant/version1.mo ""
+ingress $ID modify "DIDL\x00\x01\x7d\x0F"
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x01\x7d\x0F"
+upgrade $ID stabilize-variant/version1.mo ""
+ingress $ID modify "DIDL\x00\x01\x7d\x0F"
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x01\x7d\x0F"
+upgrade $ID stabilize-variant/version2.mo ""
diff --git a/test/run-drun/stabilize-variant/version0.mo b/test/run-drun/stabilize-variant/version0.mo
new file mode 100644
index 00000000000..212799e10c5
--- /dev/null
+++ b/test/run-drun/stabilize-variant/version0.mo
@@ -0,0 +1,21 @@
+import Prim "mo:prim";
+
+actor {
+  type Variant = {
+    #one : Nat;
+    #two : { key : Nat; var name : Text };
+  };
+
+  stable var root : Variant = #two { key = 1; var name = "TEST" };
+
+  public func modify() : async () {
+    switch root {
+      case (#two record) record.name #= " TEST";
+      case _ assert false;
+    };
+  };
+
+  public func print() : async () {
+    Prim.debugPrint(debug_show (root));
+  };
+};
diff --git a/test/run-drun/stabilize-variant/version1.mo b/test/run-drun/stabilize-variant/version1.mo
new file mode 100644
index 00000000000..49843a2510a
--- /dev/null
+++ b/test/run-drun/stabilize-variant/version1.mo
@@ -0,0 +1,23 @@
+import Prim "mo:prim";
+
+// Compatible upgrade
+actor {
+  type Variant = {
+    #two : { key : Nat; var name : Text };
+    #one : Nat;
+    #three : Text;
+  };
+
+  stable var root : Variant = #three "ERROR";
+
+  public func modify() : async () {
+    switch root {
+      case (#two record) record.name #= " TEST";
+      case _ assert false;
+    };
+  };
+
+  public func print() : async () {
+    Prim.debugPrint(debug_show (root));
+  };
+};
diff --git a/test/run-drun/stabilize-variant/version2.mo b/test/run-drun/stabilize-variant/version2.mo
new file mode 100644
index 00000000000..0911e8508a5
--- /dev/null
+++ b/test/run-drun/stabilize-variant/version2.mo
@@ -0,0 +1,22 @@
+import Prim "mo:prim";
+
+// Incompatible upgrade
+actor {
+  type Variant = {
+    #two : { key : Nat; var name : Text };
+    #one : Nat;
+  };
+
+  stable var root: Variant = #one 0;
+
+  public func modify() : async () {
+    switch root {
+      case (#two record) record.name #= " TEST";
+      case _ assert false;
+    };
+  };
+
+  public func print() : async () {
+    Prim.debugPrint(debug_show(root));
+  };
+};
diff --git a/test/run-drun/stable-mem-rts-stats.mo b/test/run-drun/stable-mem-rts-stats.mo
index 5d8da6368c7..8e784eb7576 100644
--- a/test/run-drun/stable-mem-rts-stats.mo
+++ b/test/run-drun/stable-mem-rts-stats.mo
@@ -3,7 +3,9 @@ import StableMemory "stable-mem/StableMemory";
 actor {
   let s1 = P.rts_stable_memory_size();
   let l1 = P.rts_logical_stable_memory_size();
-  P.debugPrint (debug_show({s1;l1}));
+  assert (s1 == 0 or s1 == 17 or s1 == 18);
+  assert (l1 == 0 or l1 == 16);
+  P.debugPrint ("Ignore Diff:" # debug_show({s1;l1}));
   let 0 = StableMemory.grow(16);
   stable var v = StableMemory.loadBlob(0, 65536);
   let s2 = P.rts_stable_memory_size();
diff --git a/test/run-drun/stable-regions-new-each-stabilization.mo b/test/run-drun/stable-regions-new-each-stabilization.mo
new file mode 100644
index 00000000000..595a8d279c2
--- /dev/null
+++ b/test/run-drun/stable-regions-new-each-stabilization.mo
@@ -0,0 +1,46 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+//MOC-FLAG --stable-regions
+//MOC-ENV MOC_UNLOCK_PRIM=yesplease
+
+import P "mo:⛔";
+import Region "stable-region/Region";
+
+actor {
+  stable var n = 0;
+  stable let regions : [var ?Region] = [var null, null, null];
+  
+  system func preupgrade() {
+    P.debugPrint("upgrading... calling Region.new(), n=" # debug_show n);
+    regions[n] := ?Region.new();
+    n += 1;
+  };
+  func unwrap(i : Nat, ro : ?Region) : Region {
+      P.debugPrint(" unwrapping. i=" # debug_show i);      
+      switch ro {
+      case null { assert false; loop { } };
+      case (?r) r;
+      }
+  };
+  public func sanityTest() {
+    P.debugPrint("sanity check. n=" # debug_show n);
+    if (n > 0) { P.debugPrint(debug_show Region.id(unwrap(0, regions[0]))) };
+    if (n > 1) { P.debugPrint(debug_show Region.id(unwrap(1, regions[1]))) };
+    if (n > 2) { P.debugPrint(debug_show Region.id(unwrap(2, regions[2]))) };
+  };
+}
+
+//SKIP run
+//SKIP run-low
+//SKIP run-ir
+// too slow on ic-ref-run:
+//SKIP comp-ref
+//CALL ingress sanityTest "DIDL\x00\x00"
+//CALL ingress __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+//CALL upgrade ""
+//CALL ingress sanityTest "DIDL\x00\x00"
+//CALL ingress __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+//CALL upgrade ""
+//CALL ingress sanityTest "DIDL\x00\x00"
+//CALL ingress __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+//CALL upgrade ""
+//CALL ingress sanityTest "DIDL\x00\x00"
diff --git a/test/run-drun/stable-regions-stabilization.mo b/test/run-drun/stable-regions-stabilization.mo
new file mode 100644
index 00000000000..77f3beb2432
--- /dev/null
+++ b/test/run-drun/stable-regions-stabilization.mo
@@ -0,0 +1,52 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+//MOC-FLAG --stable-regions
+//MOC-ENV MOC_UNLOCK_PRIM=yesplease
+
+import P "mo:⛔";
+import Region "stable-region/Region";
+import StableMemory "stable-mem/StableMemory";
+
+actor {
+  stable var n = 0;
+  stable var r1 = Region.new();
+  stable var r2 = Region.new();
+
+  let block_size_in_pages = 128 : Nat64;
+
+  P.debugPrint "grow three big regions (including region0).";
+  // Interleave growing regions by a block each:
+  do {
+    ignore StableMemory.grow(block_size_in_pages);
+    ignore Region.grow(r1, block_size_in_pages);
+    ignore Region.grow(r2, block_size_in_pages);
+
+    ignore StableMemory.grow(block_size_in_pages);
+    ignore Region.grow(r1, block_size_in_pages);
+    ignore Region.grow(r2, block_size_in_pages);
+
+    ignore StableMemory.grow(block_size_in_pages);
+    ignore Region.grow(r1, block_size_in_pages);
+    ignore Region.grow(r2, block_size_in_pages);
+  };
+  P.debugPrint "grow three big regions: done.";
+
+  system func preupgrade() {
+    P.debugPrint("upgrading... n=" # debug_show n);
+    n += 1;
+  };
+  public func sanityTest() {
+    P.debugPrint("sanity check. n=" # debug_show n);
+    assert Region.id(r1) == 16;
+    assert Region.id(r2) == 17;
+  };
+}
+
+//SKIP run
+//SKIP run-low
+//SKIP run-ir
+// too slow on ic-ref-run:
+//SKIP comp-ref
+//CALL ingress sanityTest "DIDL\x00\x00"
+//CALL ingress __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+//CALL upgrade ""
+//CALL ingress sanityTest "DIDL\x00\x00"
diff --git a/test/run-drun/stable-size-no-overflow.mo b/test/run-drun/stable-size-no-overflow.mo
index 7c697fef11d..bad01118990 100644
--- a/test/run-drun/stable-size-no-overflow.mo
+++ b/test/run-drun/stable-size-no-overflow.mo
@@ -1,3 +1,4 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
 import P "mo:⛔";
 import SM "stable-mem/StableMemory";
 
diff --git a/test/run-drun/stable-size-overflow.mo b/test/run-drun/stable-size-overflow.mo
new file mode 100644
index 00000000000..a544987089c
--- /dev/null
+++ b/test/run-drun/stable-size-overflow.mo
@@ -0,0 +1,25 @@
+//CLASSICAL-PERSISTENCE-ONLY
+import P "mo:⛔";
+import SM "stable-mem/StableMemory";
+
+actor {
+
+  ignore SM.grow(1);
+
+  let page : Blob = SM.loadBlob(0,65536);
+  assert (page.size() == 65536);
+
+  stable
+  let _a : [Blob] = P.Array_tabulate(65536,func _ { page });
+
+  system func preupgrade() {
+   P.debugPrint("upgrading...");
+  };
+}
+
+//SKIP run
+//SKIP run-low
+//SKIP run-ir
+
+//CALL upgrade ""
+
diff --git a/test/run-drun/tuple-record-mismatch.drun b/test/run-drun/tuple-record-mismatch.drun
index 729eea35083..125a6d03d89 100644
--- a/test/run-drun/tuple-record-mismatch.drun
+++ b/test/run-drun/tuple-record-mismatch.drun
@@ -1,3 +1,4 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
 install $ID tuple-record-mismatch/version0.mo ""
 upgrade $ID tuple-record-mismatch/version0.mo ""
 upgrade $ID tuple-record-mismatch/version1.mo ""
diff --git a/test/run-drun/tuple-stabilization.drun b/test/run-drun/tuple-stabilization.drun
new file mode 100644
index 00000000000..6f13b2cf801
--- /dev/null
+++ b/test/run-drun/tuple-stabilization.drun
@@ -0,0 +1,27 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+# SKIP ic-ref-run
+install $ID tuple-stabilization/version0.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID tuple-stabilization/version0.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID tuple-stabilization/version1.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID tuple-stabilization/version1.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID tuple-stabilization/version2.mo ""
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID tuple-stabilization/version3.mo ""
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID tuple-stabilization/version4.mo ""
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID tuple-stabilization/version5.mo ""
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID tuple-stabilization/version0.mo ""
+ingress $ID print "DIDL\x00\x00"
+ingress $ID __motoko_stabilize_before_upgrade "DIDL\x00\x00"
+upgrade $ID tuple-stabilization/version0.mo ""
+ingress $ID print "DIDL\x00\x00"
diff --git a/test/run-drun/tuple-stabilization/version0.mo b/test/run-drun/tuple-stabilization/version0.mo
new file mode 100644
index 00000000000..695f6230a75
--- /dev/null
+++ b/test/run-drun/tuple-stabilization/version0.mo
@@ -0,0 +1,15 @@
+import Prim "mo:prim";
+
+actor {
+    type TreeType = ?(Nat, TreeType, TreeType);
+
+    stable var pair = (1, 2);
+    stable var largerTuple = (0, "Test", 1.23, { key = 5; value = '_' }, [-1, 2, -3]);
+    stable var tree: TreeType = ?(2, ?(1, null: TreeType, null: TreeType), ?(4, ?(3, null: TreeType, null: TreeType), ?(5, null: TreeType, null: TreeType)));
+
+    public func print() : async () {
+        Prim.debugPrint(debug_show (pair));
+        Prim.debugPrint(debug_show (largerTuple));
+        Prim.debugPrint(debug_show (tree));
+    };
+};
diff --git a/test/run-drun/tuple-stabilization/version1.mo b/test/run-drun/tuple-stabilization/version1.mo
new file mode 100644
index 00000000000..dac99f913d6
--- /dev/null
+++ b/test/run-drun/tuple-stabilization/version1.mo
@@ -0,0 +1,18 @@
+import Prim "mo:prim";
+
+// Compatible upgrade
+actor {
+    type TreeType = ?(Nat, TreeType, TreeType);
+
+    stable var unit = ();
+    stable var pair = (1, 2);
+    stable var largerTuple = (0, "Test", 1.23, { key = 5; value = '_' }, [-1, 2, -3]);
+    stable var tree: TreeType = ?(2, ?(1, null: TreeType, null: TreeType), ?(4, ?(3, null: TreeType, null: TreeType), ?(5, null: TreeType, null: TreeType)));
+
+    public func print() : async () {
+        Prim.debugPrint(debug_show (unit));
+        Prim.debugPrint(debug_show (pair));
+        Prim.debugPrint(debug_show (largerTuple));
+        Prim.debugPrint(debug_show (tree));
+    };
+};
diff --git a/test/run-drun/tuple-stabilization/version2.mo b/test/run-drun/tuple-stabilization/version2.mo
new file mode 100644
index 00000000000..21501f4bd7a
--- /dev/null
+++ b/test/run-drun/tuple-stabilization/version2.mo
@@ -0,0 +1,10 @@
+import Prim "mo:prim";
+
+// Incompatible upgrade
+actor {
+    stable var unit = (1);
+    
+    public func print() : async () {
+        Prim.debugPrint(debug_show (unit));
+    };
+};
diff --git a/test/run-drun/tuple-stabilization/version3.mo b/test/run-drun/tuple-stabilization/version3.mo
new file mode 100644
index 00000000000..f9a1cebc680
--- /dev/null
+++ b/test/run-drun/tuple-stabilization/version3.mo
@@ -0,0 +1,10 @@
+import Prim "mo:prim";
+
+// Incompatible upgrade
+actor {
+    stable var pair = (1);
+    
+    public func print() : async () {
+        Prim.debugPrint(debug_show (pair));
+    };
+};
diff --git a/test/run-drun/tuple-stabilization/version4.mo b/test/run-drun/tuple-stabilization/version4.mo
new file mode 100644
index 00000000000..06deb846566
--- /dev/null
+++ b/test/run-drun/tuple-stabilization/version4.mo
@@ -0,0 +1,10 @@
+import Prim "mo:prim";
+
+// Incompatible upgrade
+actor {
+    stable var largerTuple = ("Test", 0, 1.23, { key = 5; value = '_' }, [-1, 2, -3]);
+
+    public func print() : async () {
+        Prim.debugPrint(debug_show (largerTuple));
+    };
+};
diff --git a/test/run-drun/tuple-stabilization/version5.mo b/test/run-drun/tuple-stabilization/version5.mo
new file mode 100644
index 00000000000..50ed4bc5e9b
--- /dev/null
+++ b/test/run-drun/tuple-stabilization/version5.mo
@@ -0,0 +1,16 @@
+import Prim "mo:prim";
+
+// Incompatible update
+actor {
+    type TreeType = ?(Nat, TreeType, Null);
+
+    stable var pair = (1, 2);
+    stable var largerTuple = (0, "Test", 1.23, { key = 5; value = '_' }, [-1, 2, -3]);
+    stable var tree: TreeType = ?(2, ?(1, null: TreeType, null), null);
+
+    public func print() : async () {
+        Prim.debugPrint(debug_show (pair));
+        Prim.debugPrint(debug_show (largerTuple));
+        Prim.debugPrint(debug_show (tree));
+    };
+};
diff --git a/test/run-drun/tuple-upgrades.drun b/test/run-drun/tuple-upgrades.drun
index 6591e7ccda3..010b1fcadaf 100644
--- a/test/run-drun/tuple-upgrades.drun
+++ b/test/run-drun/tuple-upgrades.drun
@@ -1,3 +1,4 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
 # SKIP ic-ref-run
 install $ID tuple-upgrades/version0.mo ""
 ingress $ID print "DIDL\x00\x00"
diff --git a/test/run-drun/upgrade-bignat-bigint.drun b/test/run-drun/upgrade-bignat-bigint.drun
index 941ef0cf16c..eafa49c6c13 100644
--- a/test/run-drun/upgrade-bignat-bigint.drun
+++ b/test/run-drun/upgrade-bignat-bigint.drun
@@ -1,3 +1,4 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
 # SKIP ic-ref-run
 install $ID upgrade-bignat-bigint/version0.mo ""
 ingress $ID modify "DIDL\x00\x00"
diff --git a/test/run-drun/upgrade-bignums.mo b/test/run-drun/upgrade-bignums.mo
new file mode 100644
index 00000000000..baa33696edb
--- /dev/null
+++ b/test/run-drun/upgrade-bignums.mo
@@ -0,0 +1,24 @@
+import Prim "mo:prim";
+
+actor {
+    stable var unsigned : Nat = 12345678901234567890123456789012345678901234567890123456789012345678901234567890;
+    stable var signed : Int = -12345678901234567890123456789012345678901234567890123456789012345678901234567890;
+
+    public func modify() : async () {
+        unsigned := 2 * unsigned + 1;
+        signed := 2 * signed - 1;
+    };
+
+    public func print() : async () {
+        Prim.debugPrint(debug_show (unsigned));
+        Prim.debugPrint(debug_show (signed));
+    };
+};
+
+//CALL ingress print "DIDL\x00\x00"
+//CALL ingress modify "DIDL\x00\x00"
+//CALL upgrade ""
+//CALL ingress print "DIDL\x00\x00"
+//CALL ingress modify "DIDL\x00\x00"
+//CALL upgrade ""
+//CALL ingress print "DIDL\x00\x00"
diff --git a/test/run-drun/upgrade-generics.drun b/test/run-drun/upgrade-generics.drun
index a74b6226b64..f2227fc85b6 100644
--- a/test/run-drun/upgrade-generics.drun
+++ b/test/run-drun/upgrade-generics.drun
@@ -1,3 +1,4 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
 # SKIP ic-ref-run
 install $ID upgrade-generics/version0.mo ""
 ingress $ID print "DIDL\x00\x00"
diff --git a/test/run-drun/upgrade-instructions.mo b/test/run-drun/upgrade-instructions.mo
index 38489a31a36..0649918d857 100644
--- a/test/run-drun/upgrade-instructions.mo
+++ b/test/run-drun/upgrade-instructions.mo
@@ -1,3 +1,4 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
 import Prim "mo:prim";
 
 actor {
diff --git a/test/run-drun/upgrade-mutable-array.drun b/test/run-drun/upgrade-mutable-array.drun
index 6249be67613..55ca16e235d 100644
--- a/test/run-drun/upgrade-mutable-array.drun
+++ b/test/run-drun/upgrade-mutable-array.drun
@@ -1,3 +1,4 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
 # SKIP ic-ref-run
 install $ID upgrade-mutable-array/version0.mo ""
 ingress $ID test "DIDL\x00\x00"
diff --git a/test/run-drun/upgrade-mutable-service.drun b/test/run-drun/upgrade-mutable-service.drun
index 21a1a6c172e..bbae78a28e3 100644
--- a/test/run-drun/upgrade-mutable-service.drun
+++ b/test/run-drun/upgrade-mutable-service.drun
@@ -1,3 +1,4 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
 # SKIP ic-ref-run
 install $ID upgrade-mutable-service/version0.mo ""
 ingress $ID initialize "DIDL\x00\x00"
diff --git a/test/run-drun/upgrade-mutable-variant.drun b/test/run-drun/upgrade-mutable-variant.drun
index 76601ff8da5..347fcc0b91e 100644
--- a/test/run-drun/upgrade-mutable-variant.drun
+++ b/test/run-drun/upgrade-mutable-variant.drun
@@ -1,3 +1,4 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
 # SKIP ic-ref-run
 install $ID upgrade-mutable-variant/version0.mo ""
 ingress $ID test "DIDL\x00\x00"
diff --git a/test/run-drun/upgrade-mutable.drun b/test/run-drun/upgrade-mutable.drun
index f66bffb052d..9053937824b 100644
--- a/test/run-drun/upgrade-mutable.drun
+++ b/test/run-drun/upgrade-mutable.drun
@@ -1,3 +1,4 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
 # SKIP ic-ref-run
 install $ID upgrade-mutable/version0.mo ""
 ingress $ID modify "DIDL\x00\x00"
diff --git a/test/run-drun/upgrade-nat-to-int.drun b/test/run-drun/upgrade-nat-to-int.drun
index 916b550f9da..498b6bfe685 100644
--- a/test/run-drun/upgrade-nat-to-int.drun
+++ b/test/run-drun/upgrade-nat-to-int.drun
@@ -1,3 +1,4 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
 # SKIP ic-ref-run
 install $ID upgrade-nat-to-int/version0.mo ""
 ingress $ID modify "DIDL\x00\x00"
diff --git a/test/run-drun/upgrade-non-stable.drun b/test/run-drun/upgrade-non-stable.drun
index cd320e88413..1462d50c47a 100644
--- a/test/run-drun/upgrade-non-stable.drun
+++ b/test/run-drun/upgrade-non-stable.drun
@@ -1,3 +1,4 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
 # SKIP ic-ref-run
 install $ID upgrade-non-stable/version0.mo ""
 ingress $ID print "DIDL\x00\x00"
diff --git a/test/run-drun/upgrade-non-stable/version0.mo b/test/run-drun/upgrade-non-stable/version0.mo
index 7dd6de3c5e3..87a49e87f0c 100644
--- a/test/run-drun/upgrade-non-stable/version0.mo
+++ b/test/run-drun/upgrade-non-stable/version0.mo
@@ -3,16 +3,26 @@ import Prim "mo:prim";
 actor {
   let temporary = 1;
 
+  func f() {
+    Prim.debugPrint(debug_show (temporary));
+  };
+
   stable let value : {
     stableField : Text;
   } = {
     stableField = "Version 0";
-    nonStableField = func() {
-      Prim.debugPrint(debug_show (temporary));
-    };
+    nonStableField = f;
     unreachableField = -123;
   };
 
+  stable let any : Any = f;
+  stable let tuple : (Int, Any) = (0, f);
+  stable let variant : { #tag : Any } = #tag f;
+  stable let record : { lab : Any } = { lab = f };
+  stable let vector : [Any] = [f];
+  stable let array : [var Any] = [var f];
+  stable let opt : ?Any = ?f;
+
   public func print() : async () {
     Prim.debugPrint(debug_show (value));
   };
diff --git a/test/run-drun/upgrade-non-stable/version1.mo b/test/run-drun/upgrade-non-stable/version1.mo
index 93f6116c0a1..38770680751 100644
--- a/test/run-drun/upgrade-non-stable/version1.mo
+++ b/test/run-drun/upgrade-non-stable/version1.mo
@@ -1,11 +1,18 @@
 import Prim "mo:prim";
 
 actor {
-  stable let value : {
-  } = {
+  stable let value : {} = {
     stableField = "Version 1";
   };
 
+  stable let any : Any = null;
+  stable let tuple : (Int, Any) = (0, null);
+  stable let variant : { #tag : Any } = #tag null;
+  stable let record : { lab : Any } = { lab = null };
+  stable let vector : [Any] = [null];
+  stable let array : [var Any] = [var null];
+  stable let opt : ?Any = null;
+
   public func print() : async () {
     Prim.debugPrint(debug_show (value));
   };
diff --git a/test/run-drun/upgrade-recursive-type.drun b/test/run-drun/upgrade-recursive-type.drun
index b7260ae98bc..f655e9fb9eb 100644
--- a/test/run-drun/upgrade-recursive-type.drun
+++ b/test/run-drun/upgrade-recursive-type.drun
@@ -1,3 +1,4 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
 # SKIP ic-ref-run
 install $ID upgrade-recursive-type/version0.mo ""
 ingress $ID increase "DIDL\x00\x01\x7d\x0F"
diff --git a/test/run-drun/upgrade-remove-add-fields.drun b/test/run-drun/upgrade-remove-add-fields.drun
index c9fa228534a..9b2b4c4a47d 100644
--- a/test/run-drun/upgrade-remove-add-fields.drun
+++ b/test/run-drun/upgrade-remove-add-fields.drun
@@ -1,3 +1,4 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
 # SKIP ic-ref-run
 install $ID upgrade-remove-add-fields/version0.mo ""
 ingress $ID increase "DIDL\x00\x01\x7d\x0F"
diff --git a/test/run-drun/upgrade-service.drun b/test/run-drun/upgrade-service.drun
index 5879eacc822..7a279360aaa 100644
--- a/test/run-drun/upgrade-service.drun
+++ b/test/run-drun/upgrade-service.drun
@@ -1,3 +1,4 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
 # SKIP ic-ref-run
 install $ID upgrade-service/version0.mo ""
 ingress $ID initialize "DIDL\x00\x00"
diff --git a/test/run-drun/upgrade-to-any.drun b/test/run-drun/upgrade-to-any.drun
index ea5756edae1..693fc5709d0 100644
--- a/test/run-drun/upgrade-to-any.drun
+++ b/test/run-drun/upgrade-to-any.drun
@@ -1,3 +1,4 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
 # SKIP ic-ref-run
 install $ID upgrade-to-any/version0.mo ""
 ingress $ID check "DIDL\x00\x00"
diff --git a/test/run-drun/upgrade-to-any/version0.mo b/test/run-drun/upgrade-to-any/version0.mo
index 2d2d80d943a..5763645a096 100644
--- a/test/run-drun/upgrade-to-any/version0.mo
+++ b/test/run-drun/upgrade-to-any/version0.mo
@@ -3,11 +3,11 @@ import Prim "mo:prim";
 actor {
    Prim.debugPrint("Version 0");
 
-   let allocationSize = 40_000_000;
+   let allocationSize = 64 * 1024 * 1024;
 
    func largeAllocation(name: Text): [var Nat] {
       Prim.debugPrint("Initialize " # name);
-      Prim.Array_init(allocationSize / 4, 0);
+      Prim.Array_init(allocationSize / 8, 0);
    };
 
    stable var firstVariable : [var Nat] = largeAllocation("first variable");
@@ -16,6 +16,7 @@ actor {
       // Extra GC increments.
       await async {};
       await async {};
+      await async {};
       // Check that first variable has been cleared and the first array has been reclaimed.
       assert(Prim.rts_heap_size() >= allocationSize);
    };
diff --git a/test/run-drun/upgrade-to-any/version1.mo b/test/run-drun/upgrade-to-any/version1.mo
index bf0f251ca60..6532ded262b 100644
--- a/test/run-drun/upgrade-to-any/version1.mo
+++ b/test/run-drun/upgrade-to-any/version1.mo
@@ -3,11 +3,11 @@ import Prim "mo:prim";
 actor {
    Prim.debugPrint("Version 1");
 
-   let allocationSize = 40_000_000;
+   let allocationSize = 64 * 1024 * 1024;
 
    func largeAllocation(name: Text): [var Nat] {
       Prim.debugPrint("Initialize " # name);
-      Prim.Array_init(allocationSize / 4, 0);
+      Prim.Array_init(allocationSize / 8, 0);
    };
 
    stable var firstVariable : Any = largeAllocation("first variable");
@@ -17,6 +17,7 @@ actor {
       // Extra GC increments.
       await async {};
       await async {};
+      await async {};
       // Check that first variable has been cleared and the first array has been reclaimed.
       assert(Prim.rts_heap_size() >= allocationSize);
       assert(Prim.rts_heap_size() < 2 * allocationSize);
diff --git a/test/run-drun/upgrade-to-any/version2.mo b/test/run-drun/upgrade-to-any/version2.mo
index 9051e2fd5cf..4fe5c28804e 100644
--- a/test/run-drun/upgrade-to-any/version2.mo
+++ b/test/run-drun/upgrade-to-any/version2.mo
@@ -3,7 +3,7 @@ import Prim "mo:prim";
 actor {
    Prim.debugPrint("Version 2");
 
-   let allocationSize = 40_000_000;
+   let allocationSize = 64 * 1024 * 1024;
 
    func largeAllocation(name: Text): [var Nat] {
       Prim.trap("Should not be called");
@@ -16,6 +16,7 @@ actor {
       // Extra GC increments.
       await async {};
       await async {};
+      await async {};
       // Check that both variables have been cleared and both arrays have been reclaimed.
       assert(Prim.rts_heap_size() < allocationSize);
    }
diff --git a/test/run-drun/variant-upgrades.drun b/test/run-drun/variant-upgrades.drun
index b27a021eb78..e4239420556 100644
--- a/test/run-drun/variant-upgrades.drun
+++ b/test/run-drun/variant-upgrades.drun
@@ -1,3 +1,4 @@
+# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
 # SKIP ic-ref-run
 install $ID variant-upgrades/version0.mo ""
 ingress $ID modify "DIDL\x00\x01\x7d\x0F"
diff --git a/test/run.sh b/test/run.sh
index 5ef2deae36b..b0713334276 100755
--- a/test/run.sh
+++ b/test/run.sh
@@ -26,7 +26,7 @@ DTESTS=no
 IDL=no
 PERF=no
 VIPER=no
-WASMTIME_OPTIONS="-C cache=n -W nan-canonicalization=y -W multi-memory -W bulk-memory"
+WASMTIME_OPTIONS="-C cache=n -W nan-canonicalization=y -W memory64 -W multi-memory -W bulk-memory"
 WRAP_drun=$(realpath $(dirname $0)/drun-wrapper.sh)
 WRAP_ic_ref_run=$(realpath $(dirname $0)/ic-ref-run-wrapper.sh)
 SKIP_RUNNING=${SKIP_RUNNING:-no}
@@ -227,7 +227,7 @@ then
       HAVE_drun=no
     fi
   fi
-  # ic-wasm does not yet support passive data segments
+  # TODO: Re-enable when ic_wasm supports Wasm64 and passive data segments
   # if ic-wasm --help >& /dev/null
   # then
   #   HAVE_ic_wasm=yes
@@ -306,6 +306,22 @@ do
     else
       TEST_MOC_ARGS=$EXTRA_MOC_ARGS
     fi
+    if grep -q "//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY" $base.mo
+    then
+      if [[ $EXTRA_MOC_ARGS != *"--enhanced-orthogonal-persistence"* ]]
+      then
+        $ECHO " Skipped (not applicable to classical orthogonal persistence)"
+        continue
+      fi
+    fi
+    if grep -q "//CLASSICAL-PERSISTENCE-ONLY" $base.mo
+    then
+      if [[ $EXTRA_MOC_ARGS == *"--enhanced-orthogonal-persistence"* ]]
+      then
+        $ECHO " Skipped (not applicable to enhanced persistence)"
+        continue
+      fi
+    fi
     if [ $VIPER = 'yes' ]
     then
       TEST_MOC_ARGS="$TEST_MOC_ARGS --package base pkg/base"
@@ -408,8 +424,8 @@ do
 
         if [ "$SKIP_VALIDATE" != yes ]
         then
-          run_if wasm valid wasm-validate --enable-multi-memory $out/$base.wasm
-          run_if ref.wasm valid-ref wasm-validate --enable-multi-memory $out/$base.ref.wasm
+          run_if wasm valid wasm-validate --enable-memory64 --enable-multi-memory $out/$base.wasm
+          run_if ref.wasm valid-ref wasm-validate --enable-memory64 --enable-multi-memory $out/$base.ref.wasm
         fi
 
         if [ -e $out/$base.wasm ]
@@ -420,7 +436,7 @@ do
             if grep -F -q CHECK $mangled
             then
               $ECHO -n " [FileCheck]"
-              wasm2wat --enable-multi-memory --no-check $out/$base.wasm > $out/$base.wat
+              wasm2wat --enable-memory64 --enable-multi-memory --no-check $out/$base.wasm > $out/$base.wat
               cat $out/$base.wat | FileCheck $mangled > $out/$base.filecheck 2>&1
               diff_files="$diff_files $base.filecheck"
             fi
@@ -484,7 +500,28 @@ do
       then
         continue
       fi
-
+      if grep -q "# ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY" $(basename $file)
+      then
+        if [[ $EXTRA_MOC_ARGS != *"--enhanced-orthogonal-persistence"* ]]
+        then
+          continue
+        fi
+      fi
+      if grep -q "# CLASSICAL-PERSISTENCE-ONLY" $(basename $file)
+      then
+        if [[ $EXTRA_MOC_ARGS == *"--enhanced-orthogonal-persistence"* ]]
+        then
+          continue
+        fi
+      fi
+      if grep -q "# DEFAULT-GC-ONLY" $(basename $file)
+      then
+        if [[ $EXTRA_MOC_ARGS == *"--copying-gc"* ]] || [[ $EXTRA_MOC_ARGS == *"--compacting-gc"* ]] || [[ $EXTRA_MOC_ARGS == *"--generational-gc"* ]] || [[ $EXTRA_MOC_ARGS == *"--incremental-gc"* ]]
+        then
+          continue
+        fi
+      fi
+      
       have_var_name="HAVE_${runner//-/_}"
       if [ ${!have_var_name} != yes ]
       then
@@ -536,7 +573,7 @@ do
 
     if [ -e $out/$base.linked.wasm ]
     then
-        run wasm2wat wasm2wat $out/$base.linked.wasm -o $out/$base.linked.wat
+        run wasm2wat wasm2wat --enable-memory64 $out/$base.linked.wasm -o $out/$base.linked.wat
         diff_files="$diff_files $base.linked.wat"
     fi
   ;;
diff --git a/test/run/beyond-4GB.mo b/test/run/beyond-4GB.mo
new file mode 100644
index 00000000000..26a927b70d4
--- /dev/null
+++ b/test/run/beyond-4GB.mo
@@ -0,0 +1,23 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+// test allocation beyond the 32-bit address space
+import P "mo:⛔";
+do {
+
+  let expectedSize = 5 * 1024 * 1024 * 1024; // 10 GB
+  var c = 5;
+
+  while(c > 0) {
+    ignore P.Array_init(1024*1024*1024/8, 0xFF);
+    c -= 1;
+  };
+
+  
+  assert(P.rts_memory_size() > expectedSize);
+  assert(P.rts_heap_size() > expectedSize);
+}
+
+//SKIP run
+//SKIP run-low
+//SKIP run-ir
+
+
diff --git a/test/run/compact-float.mo b/test/run/compact-float.mo
new file mode 100644
index 00000000000..268195bfa46
--- /dev/null
+++ b/test/run/compact-float.mo
@@ -0,0 +1,21 @@
+import Prim "mo:prim";
+
+let maxCompactFloat32 = 1073741823.0;
+Prim.debugPrint(debug_show (Prim.floatToInt(maxCompactFloat32)));
+Prim.debugPrint(debug_show (Prim.floatToInt(maxCompactFloat32 - 1)));
+Prim.debugPrint(debug_show (Prim.floatToInt(maxCompactFloat32 + 1)));
+
+let minCompactFloat32 = -1073741824.0;
+Prim.debugPrint(debug_show (Prim.floatToInt(minCompactFloat32)));
+Prim.debugPrint(debug_show (Prim.floatToInt(minCompactFloat32 - 1)));
+Prim.debugPrint(debug_show (Prim.floatToInt(minCompactFloat32 + 1)));
+
+let maxCompactFloat64 = 4611686018427387400.0;
+let minNonCompactFloat64 = 4611686018427388000.0;
+Prim.debugPrint(debug_show (Prim.floatToInt(maxCompactFloat64)));
+Prim.debugPrint(debug_show (Prim.floatToInt(minNonCompactFloat64)));
+
+let minCompactFloat64 = -4611686018427387400.0;
+let maxNonCompactFloat64 = -4611686018427388000.0;
+Prim.debugPrint(debug_show (Prim.floatToInt(minCompactFloat64)));
+Prim.debugPrint(debug_show (Prim.floatToInt(maxNonCompactFloat64)));
diff --git a/test/run/empty-if.mo b/test/run/empty-if-classical.mo
similarity index 88%
rename from test/run/empty-if.mo
rename to test/run/empty-if-classical.mo
index e1d22c03bb5..867c15193e6 100644
--- a/test/run/empty-if.mo
+++ b/test/run/empty-if-classical.mo
@@ -1,3 +1,4 @@
+//CLASSICAL-PERSISTENCE-ONLY
 var a = 'N';
 
 func foo() = if (a == 'Y') {} else {};
diff --git a/test/run/empty-if-eop.mo b/test/run/empty-if-eop.mo
new file mode 100644
index 00000000000..05ed3f756e3
--- /dev/null
+++ b/test/run/empty-if-eop.mo
@@ -0,0 +1,17 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+var a = 'N';
+
+func foo() = if (a == 'Y') {} else {};
+func barX() = if (a == 'Y') {} else {};
+
+barX();
+foo();
+
+// CHECK: func $foo
+// CHECK: i64.eq
+// CHECK-NEXT: drop
+// CHECK: func $barX
+
+//SKIP run
+//SKIP run-ir
+//SKIP run-low
diff --git a/test/run/float-fmt.mo b/test/run/float-fmt-classical.mo
similarity index 98%
rename from test/run/float-fmt.mo
rename to test/run/float-fmt-classical.mo
index 264e5a1cee8..2c1103babc8 100644
--- a/test/run/float-fmt.mo
+++ b/test/run/float-fmt-classical.mo
@@ -1,3 +1,4 @@
+//CLASSICAL-PERSISTENCE-ONLY
 import Prim "mo:⛔";
 
 let pi = 3.141592653589793238;
diff --git a/test/run/float-fmt-eop.mo b/test/run/float-fmt-eop.mo
new file mode 100644
index 00000000000..e165628a973
--- /dev/null
+++ b/test/run/float-fmt-eop.mo
@@ -0,0 +1,41 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+import Prim "mo:⛔";
+
+let pi = 3.141592653589793238;
+
+Prim.debugPrint "fixed Float";
+Prim.debugPrint(Prim.floatToFormattedText(pi, 0, 0));
+Prim.debugPrint(Prim.floatToFormattedText(pi, 1, 0));
+Prim.debugPrint(Prim.floatToFormattedText(pi, 4, 0));
+Prim.debugPrint(Prim.floatToFormattedText(pi, 9, 0));
+Prim.debugPrint(Prim.floatToFormattedText(pi, 16, 0));
+Prim.debugPrint(Prim.floatToFormattedText(pi, 17, 0));
+Prim.debugPrint(Prim.floatToFormattedText(pi, 20, 0));
+
+Prim.debugPrint "exponential Float";
+Prim.debugPrint(Prim.floatToFormattedText(pi, 0, 1));
+Prim.debugPrint(Prim.floatToFormattedText(pi, 1, 1));
+Prim.debugPrint(Prim.floatToFormattedText(pi, 4, 1));
+Prim.debugPrint(Prim.floatToFormattedText(pi, 9, 1));
+Prim.debugPrint(Prim.floatToFormattedText(pi, 16, 1));
+Prim.debugPrint(Prim.floatToFormattedText(pi, 17, 1));
+Prim.debugPrint(Prim.floatToFormattedText(pi, 20, 1));
+
+Prim.debugPrint "generic Float";
+Prim.debugPrint(Prim.floatToFormattedText(pi, 0, 2));
+Prim.debugPrint(Prim.floatToFormattedText(pi, 1, 2));
+Prim.debugPrint(Prim.floatToFormattedText(pi, 4, 2));
+Prim.debugPrint(Prim.floatToFormattedText(pi, 9, 2));
+Prim.debugPrint(Prim.floatToFormattedText(pi, 16, 2));
+Prim.debugPrint(Prim.floatToFormattedText(pi, 17, 2));
+Prim.debugPrint(Prim.floatToFormattedText(pi, 20, 2));
+
+// TODO: Support for 64-bit
+// Prim.debugPrint "hex Float";
+// Prim.debugPrint(Prim.floatToFormattedText(pi, 0, 3));
+// Prim.debugPrint(Prim.floatToFormattedText(pi, 1, 3));
+// Prim.debugPrint(Prim.floatToFormattedText(pi, 4, 3));
+// Prim.debugPrint(Prim.floatToFormattedText(pi, 9, 3));
+// Prim.debugPrint(Prim.floatToFormattedText(pi, 16, 3));
+// Prim.debugPrint(Prim.floatToFormattedText(pi, 17, 3));
+// Prim.debugPrint(Prim.floatToFormattedText(pi, 20, 3));
diff --git a/test/run/float-ops.mo b/test/run/float-ops.mo
index 71cd2793557..3819d498b78 100644
--- a/test/run/float-ops.mo
+++ b/test/run/float-ops.mo
@@ -29,8 +29,9 @@ assert (not isNegative(positiveNaN));
 assert (isNegative(negativeNaN));
 assert (isNegative(negate(positiveNaN)));
 assert (not isNegative(negate(negativeNaN)));
-Prim.debugPrint(debug_show(positiveNaN));
-Prim.debugPrint(debug_show(negativeNaN));
+// Difference between 32-bit and 64-bit version. 64-bit is even more accurate (with correct sign bit on `nan`).
+// Prim.debugPrint(debug_show(positiveNaN));
+// Prim.debugPrint(debug_show(negativeNaN));
 
 assert (Prim.floatAbs(9.7) == 9.7);
 assert (Prim.floatAbs(-9.7) == 9.7);
diff --git a/test/run/idl-ops.mo b/test/run/idl-ops.mo
index ebcb10a4f0c..0650d2e373a 100644
--- a/test/run/idl-ops.mo
+++ b/test/run/idl-ops.mo
@@ -50,7 +50,7 @@ assert((?arrayNat) == deserArrayInt (serArrayInt arrayNat));
 assert(null == deserArrayNat (serArrayInt arrayInt));
 assert((?arrayInt) == deserArrayInt (serArrayInt arrayInt));
 let heapDifference = Prim.rts_heap_size() : Int - started_with;
-assert(heapDifference <= 40_000);
+assert(heapDifference <= 50_000);
 
 //SKIP run
 //SKIP run-ir
diff --git a/test/run/idl.mo b/test/run/idl.mo
index 83ad7aae6ab..5e9d777999c 100644
--- a/test/run/idl.mo
+++ b/test/run/idl.mo
@@ -45,7 +45,9 @@ assert(arrayNat == deserArrayInt (serArrayNat arrayNat));
 assert(arrayNat == deserArrayInt (serArrayInt arrayNat));
 assert(arrayInt == deserArrayInt (serArrayInt arrayInt));
 let heapDifference = Prim.rts_heap_size() : Int - started_with;
-assert(heapDifference == 5_388);
+
+// Difference between enhanced orthogonal persistence and classical persistence (with and without incremental GC).
+assert(heapDifference == 2_816 or heapDifference == 4_488 or heapDifference == 4_888);
 
 //SKIP run
 //SKIP run-ir
diff --git a/test/run/if-swap.mo b/test/run/if-swap.mo
deleted file mode 100644
index e68e76f5903..00000000000
--- a/test/run/if-swap.mo
+++ /dev/null
@@ -1,12 +0,0 @@
-func wantSeeSwap(x : Bool) : Nat =
-    if (not x) 42 else 25;
-
-ignore(wantSeeSwap(true));
-
-// FHECK-LABEL: (func $wantSeeSwap
-// FHECK-NEXT: local.get $x
-// FHECK-NEXT: if (result i32)
-// FHECK-NEXT: i32.const 50
-// FHECK-NEXT: else
-// FHECK-NEXT: i32.const 84
-
diff --git a/test/run/inc-oom.mo b/test/run/inc-oom.mo
deleted file mode 100644
index 0af9c152ea2..00000000000
--- a/test/run/inc-oom.mo
+++ /dev/null
@@ -1,18 +0,0 @@
-// test incremental oom by allocating 5 GB, one GB at a time
-import P "mo:⛔";
-do {
-
-  var c = 5;
-
-  while(c > 0) {
-    let a : [var Nat8] = P.Array_init(1024*1024*1024/4, 0xFF);
-    c -= 1;
-  };
-
-}
-
-//SKIP run
-//SKIP run-low
-//SKIP run-ir
-
-
diff --git a/test/run/issue1356.mo b/test/run/issue1356-classical.mo
similarity index 98%
rename from test/run/issue1356.mo
rename to test/run/issue1356-classical.mo
index 1e03d16e41b..334be5dc1af 100644
--- a/test/run/issue1356.mo
+++ b/test/run/issue1356-classical.mo
@@ -1,3 +1,4 @@
+//CLASSICAL-PERSISTENCE-ONLY
 // FHECK-LABEL: (func $init
 
 func matchNat(n : Nat) : Bool =
diff --git a/test/run/issue1356-enhanced.mo b/test/run/issue1356-enhanced.mo
new file mode 100644
index 00000000000..70c75b4f036
--- /dev/null
+++ b/test/run/issue1356-enhanced.mo
@@ -0,0 +1,39 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+// CHECK-LABEL: (func $init
+
+func matchNat(n : Nat) : Bool =
+         switch n { case 1073741823 true
+                  ; case _ false };
+// CHECK-LABEL: (func $matchNat
+// CHECK:        local.get $switch_in
+// CHECK-NEXT:   i64.const 4294967294
+// CHECK-NEXT:   call $B_eq
+
+func matchInt(n : Int) : Bool =
+         switch n { case (-1073741824) true
+                  ; case 1073741823 true
+                  ; case _ false };
+// CHECK-LABEL: (func $matchInt
+// CHECK:        local.get $switch_in
+// CHECK-NEXT:   i64.const -4294967294
+// CHECK-NEXT:   $B_eq
+// CHECK:        local.get $switch_in
+// CHECK-NEXT:   i64.const 4294967294
+// CHECK-NEXT:   $B_eq
+
+func match8(n : Nat8) : Bool = switch n { case 42 true; case _ false };
+// CHECK-LABEL: (func $match8
+// CHECK:        i64.const 3044433348102455296
+// CHECK-NEXT:   i64.eq
+
+func match16(n : Nat16) : Bool = switch n { case 42 true; case _ false };
+// CHECK-LABEL: (func $match16
+// CHECK:        i64.const 11892317766025216
+// CHECK-NEXT:   i64.eq
+
+// NB: reverse order, so that things appear in order
+assert (match16(42));
+assert (match8(42));
+assert (matchInt(1073741823));
+assert (matchInt(-1073741824));
+assert (matchNat(1073741823));
diff --git a/test/run/leb-random.mo b/test/run/leb-random.mo
new file mode 100644
index 00000000000..0c37e82ddcb
--- /dev/null
+++ b/test/run/leb-random.mo
@@ -0,0 +1,44 @@
+object Random {
+    let max = 0xF_FFFF_FFFF_FFFF_FFFF;
+    let seed = 4711;
+    var number = seed;
+
+    public func nextInt() : Int {
+        let number = +nextNat();
+        let sign = if (number % 2 == 0) { -1 } else { +1 };
+        number * sign;
+    };
+
+    public func nextNat() : Nat {
+        number := (123138118391 * number + 133489131) % 9_999_999;
+        (number * number) % max;
+    };
+};
+
+func serializeInt(a : [Int]) : Blob = to_candid (a);
+func deserialzeInt(b : Blob) : ?[Int] = from_candid (b);
+
+func serializeNat(a : [Nat]) : Blob = to_candid (a);
+func deserialzeNat(b : Blob) : ?[Nat] = from_candid (b);
+
+let rounds = 100000;
+
+var count = 0;
+while (count < rounds) {
+    let number = Random.nextInt();
+    assert ((?[number]) == deserialzeInt(serializeInt([number])));
+    count += 1;
+};
+
+
+count := 0;
+while (count < rounds) {
+    let number = Random.nextNat();
+    assert ((?[number]) == deserialzeNat(serializeNat([number])));
+    count += 1;
+};
+
+
+//SKIP run
+//SKIP run-ir
+//SKIP run-low
diff --git a/test/run/ok/compact-float.run-ir.ok b/test/run/ok/compact-float.run-ir.ok
new file mode 100644
index 00000000000..ca791e80734
--- /dev/null
+++ b/test/run/ok/compact-float.run-ir.ok
@@ -0,0 +1,10 @@
++1_073_741_823
++1_073_741_822
++1_073_741_824
+-1_073_741_824
+-1_073_741_825
+-1_073_741_823
++4_611_686_018_427_387_392
++4_611_686_018_427_387_904
+-4_611_686_018_427_387_392
+-4_611_686_018_427_387_904
diff --git a/test/run/ok/compact-float.run-low.ok b/test/run/ok/compact-float.run-low.ok
new file mode 100644
index 00000000000..ca791e80734
--- /dev/null
+++ b/test/run/ok/compact-float.run-low.ok
@@ -0,0 +1,10 @@
++1_073_741_823
++1_073_741_822
++1_073_741_824
+-1_073_741_824
+-1_073_741_825
+-1_073_741_823
++4_611_686_018_427_387_392
++4_611_686_018_427_387_904
+-4_611_686_018_427_387_392
+-4_611_686_018_427_387_904
diff --git a/test/run/ok/compact-float.run.ok b/test/run/ok/compact-float.run.ok
new file mode 100644
index 00000000000..ca791e80734
--- /dev/null
+++ b/test/run/ok/compact-float.run.ok
@@ -0,0 +1,10 @@
++1_073_741_823
++1_073_741_822
++1_073_741_824
+-1_073_741_824
+-1_073_741_825
+-1_073_741_823
++4_611_686_018_427_387_392
++4_611_686_018_427_387_904
+-4_611_686_018_427_387_392
+-4_611_686_018_427_387_904
diff --git a/test/run/ok/compact-float.wasm-run.ok b/test/run/ok/compact-float.wasm-run.ok
new file mode 100644
index 00000000000..ca791e80734
--- /dev/null
+++ b/test/run/ok/compact-float.wasm-run.ok
@@ -0,0 +1,10 @@
++1_073_741_823
++1_073_741_822
++1_073_741_824
+-1_073_741_824
+-1_073_741_825
+-1_073_741_823
++4_611_686_018_427_387_392
++4_611_686_018_427_387_904
+-4_611_686_018_427_387_392
+-4_611_686_018_427_387_904
diff --git a/test/run/ok/float-fmt.run-ir.ok b/test/run/ok/float-fmt-classical.run-ir.ok
similarity index 100%
rename from test/run/ok/float-fmt.run-ir.ok
rename to test/run/ok/float-fmt-classical.run-ir.ok
diff --git a/test/run/ok/float-fmt.run-low.ok b/test/run/ok/float-fmt-classical.run-low.ok
similarity index 100%
rename from test/run/ok/float-fmt.run-low.ok
rename to test/run/ok/float-fmt-classical.run-low.ok
diff --git a/test/run/ok/float-fmt.run.ok b/test/run/ok/float-fmt-classical.run.ok
similarity index 100%
rename from test/run/ok/float-fmt.run.ok
rename to test/run/ok/float-fmt-classical.run.ok
diff --git a/test/run/ok/float-fmt.wasm-run.ok b/test/run/ok/float-fmt-classical.wasm-run.ok
similarity index 100%
rename from test/run/ok/float-fmt.wasm-run.ok
rename to test/run/ok/float-fmt-classical.wasm-run.ok
diff --git a/test/run/ok/float-fmt-eop.run-ir.ok b/test/run/ok/float-fmt-eop.run-ir.ok
new file mode 100644
index 00000000000..36f28f6951f
--- /dev/null
+++ b/test/run/ok/float-fmt-eop.run-ir.ok
@@ -0,0 +1,24 @@
+fixed Float
+3
+3.1
+3.1416
+3.141592654
+3.1415926535897931
+3.14159265358979312
+3.14159265358979311600
+exponential Float
+3e+00
+3.1e+00
+3.1416e+00
+3.141592654e+00
+3.1415926535897931e+00
+3.14159265358979312e+00
+3.14159265358979311600e+00
+generic Float
+3
+3
+3.142
+3.14159265
+3.141592653589793
+3.1415926535897931
+3.141592653589793116
diff --git a/test/run/ok/float-fmt-eop.run-low.ok b/test/run/ok/float-fmt-eop.run-low.ok
new file mode 100644
index 00000000000..36f28f6951f
--- /dev/null
+++ b/test/run/ok/float-fmt-eop.run-low.ok
@@ -0,0 +1,24 @@
+fixed Float
+3
+3.1
+3.1416
+3.141592654
+3.1415926535897931
+3.14159265358979312
+3.14159265358979311600
+exponential Float
+3e+00
+3.1e+00
+3.1416e+00
+3.141592654e+00
+3.1415926535897931e+00
+3.14159265358979312e+00
+3.14159265358979311600e+00
+generic Float
+3
+3
+3.142
+3.14159265
+3.141592653589793
+3.1415926535897931
+3.141592653589793116
diff --git a/test/run/ok/float-fmt-eop.run.ok b/test/run/ok/float-fmt-eop.run.ok
new file mode 100644
index 00000000000..36f28f6951f
--- /dev/null
+++ b/test/run/ok/float-fmt-eop.run.ok
@@ -0,0 +1,24 @@
+fixed Float
+3
+3.1
+3.1416
+3.141592654
+3.1415926535897931
+3.14159265358979312
+3.14159265358979311600
+exponential Float
+3e+00
+3.1e+00
+3.1416e+00
+3.141592654e+00
+3.1415926535897931e+00
+3.14159265358979312e+00
+3.14159265358979311600e+00
+generic Float
+3
+3
+3.142
+3.14159265
+3.141592653589793
+3.1415926535897931
+3.141592653589793116
diff --git a/test/run/ok/float-fmt-eop.wasm-run.ok b/test/run/ok/float-fmt-eop.wasm-run.ok
new file mode 100644
index 00000000000..35b1b5e1dcc
--- /dev/null
+++ b/test/run/ok/float-fmt-eop.wasm-run.ok
@@ -0,0 +1,24 @@
+fixed Float
+3
+3.1
+3.1416
+3.141592654
+3.1415926535897931
+3.14159265358979312
+3.14159265358979311600
+exponential Float
+3e0
+3.1e0
+3.1416e0
+3.141592654e0
+3.1415926535897931e0
+3.14159265358979312e0
+3.14159265358979311600e0
+generic Float
+3
+3.1
+3.1416
+3.141592654
+3.1415926535897931
+3.14159265358979312
+3.14159265358979311600
diff --git a/test/run/ok/float-ops.run-ir.ok b/test/run/ok/float-ops.run-ir.ok
index 72525491b63..743650b9509 100644
--- a/test/run/ok/float-ops.run-ir.ok
+++ b/test/run/ok/float-ops.run-ir.ok
@@ -1,7 +1,5 @@
 0
 -0
-nan:0x8_0000_0000_0000
--nan:0x8_0000_0000_0000
 2.099_999_999_999_999_6
 -2.099_999_999_999_999_6
 2.099_999_999_999_999_6
diff --git a/test/run/ok/float-ops.run-low.ok b/test/run/ok/float-ops.run-low.ok
index 72525491b63..743650b9509 100644
--- a/test/run/ok/float-ops.run-low.ok
+++ b/test/run/ok/float-ops.run-low.ok
@@ -1,7 +1,5 @@
 0
 -0
-nan:0x8_0000_0000_0000
--nan:0x8_0000_0000_0000
 2.099_999_999_999_999_6
 -2.099_999_999_999_999_6
 2.099_999_999_999_999_6
diff --git a/test/run/ok/float-ops.run.ok b/test/run/ok/float-ops.run.ok
index 72525491b63..743650b9509 100644
--- a/test/run/ok/float-ops.run.ok
+++ b/test/run/ok/float-ops.run.ok
@@ -1,7 +1,5 @@
 0
 -0
-nan:0x8_0000_0000_0000
--nan:0x8_0000_0000_0000
 2.099_999_999_999_999_6
 -2.099_999_999_999_999_6
 2.099_999_999_999_999_6
diff --git a/test/run/ok/float-ops.wasm-run.ok b/test/run/ok/float-ops.wasm-run.ok
index b494bc41734..5fd0adfa58d 100644
--- a/test/run/ok/float-ops.wasm-run.ok
+++ b/test/run/ok/float-ops.wasm-run.ok
@@ -1,7 +1,5 @@
 0.000000
 -0.000000
-nan
--nan
 2.100000
 -2.100000
 2.100000
diff --git a/test/run/ok/inc-oom.wasm-run.ok b/test/run/ok/inc-oom.wasm-run.ok
deleted file mode 100644
index 9187596c5be..00000000000
--- a/test/run/ok/inc-oom.wasm-run.ok
+++ /dev/null
@@ -1 +0,0 @@
-RTS error: Cannot grow memory
diff --git a/test/run/ok/optimise-for-array.run-ir.ok b/test/run/ok/optimise-for-array-classical.run-ir.ok
similarity index 100%
rename from test/run/ok/optimise-for-array.run-ir.ok
rename to test/run/ok/optimise-for-array-classical.run-ir.ok
diff --git a/test/run/ok/optimise-for-array.run-low.ok b/test/run/ok/optimise-for-array-classical.run-low.ok
similarity index 100%
rename from test/run/ok/optimise-for-array.run-low.ok
rename to test/run/ok/optimise-for-array-classical.run-low.ok
diff --git a/test/run/ok/optimise-for-array.run.ok b/test/run/ok/optimise-for-array-classical.run.ok
similarity index 100%
rename from test/run/ok/optimise-for-array.run.ok
rename to test/run/ok/optimise-for-array-classical.run.ok
diff --git a/test/run/ok/optimise-for-array-classical.tc.ok b/test/run/ok/optimise-for-array-classical.tc.ok
new file mode 100644
index 00000000000..76302a414fd
--- /dev/null
+++ b/test/run/ok/optimise-for-array-classical.tc.ok
@@ -0,0 +1 @@
+optimise-for-array-classical.mo:124.6-124.8: warning [M0194], unused identifier f9 (delete or rename to wildcard `_` or `_f9`)
diff --git a/test/run/ok/optimise-for-array.wasm-run.ok b/test/run/ok/optimise-for-array-classical.wasm-run.ok
similarity index 100%
rename from test/run/ok/optimise-for-array.wasm-run.ok
rename to test/run/ok/optimise-for-array-classical.wasm-run.ok
diff --git a/test/run/ok/optimise-for-array-enhanced.run-ir.ok b/test/run/ok/optimise-for-array-enhanced.run-ir.ok
new file mode 100644
index 00000000000..7a4990793f9
--- /dev/null
+++ b/test/run/ok/optimise-for-array-enhanced.run-ir.ok
@@ -0,0 +1,12 @@
+hello
+world
+hello
+mutable
+world
+hello
+mutable
+world
+hello
+immutable
+world
+want to see you
diff --git a/test/run/ok/optimise-for-array-enhanced.run-low.ok b/test/run/ok/optimise-for-array-enhanced.run-low.ok
new file mode 100644
index 00000000000..7a4990793f9
--- /dev/null
+++ b/test/run/ok/optimise-for-array-enhanced.run-low.ok
@@ -0,0 +1,12 @@
+hello
+world
+hello
+mutable
+world
+hello
+mutable
+world
+hello
+immutable
+world
+want to see you
diff --git a/test/run/ok/optimise-for-array-enhanced.run.ok b/test/run/ok/optimise-for-array-enhanced.run.ok
new file mode 100644
index 00000000000..7a4990793f9
--- /dev/null
+++ b/test/run/ok/optimise-for-array-enhanced.run.ok
@@ -0,0 +1,12 @@
+hello
+world
+hello
+mutable
+world
+hello
+mutable
+world
+hello
+immutable
+world
+want to see you
diff --git a/test/run/ok/optimise-for-array-enhanced.tc.ok b/test/run/ok/optimise-for-array-enhanced.tc.ok
new file mode 100644
index 00000000000..2bf3ae6232f
--- /dev/null
+++ b/test/run/ok/optimise-for-array-enhanced.tc.ok
@@ -0,0 +1 @@
+optimise-for-array-enhanced.mo:124.6-124.8: warning [M0194], unused identifier f9 (delete or rename to wildcard `_` or `_f9`)
diff --git a/test/run/ok/optimise-for-array-enhanced.wasm-run.ok b/test/run/ok/optimise-for-array-enhanced.wasm-run.ok
new file mode 100644
index 00000000000..7a4990793f9
--- /dev/null
+++ b/test/run/ok/optimise-for-array-enhanced.wasm-run.ok
@@ -0,0 +1,12 @@
+hello
+world
+hello
+mutable
+world
+hello
+mutable
+world
+hello
+immutable
+world
+want to see you
diff --git a/test/run/ok/optimise-for-array.tc.ok b/test/run/ok/optimise-for-array.tc.ok
deleted file mode 100644
index 837540a7c42..00000000000
--- a/test/run/ok/optimise-for-array.tc.ok
+++ /dev/null
@@ -1 +0,0 @@
-optimise-for-array.mo:123.6-123.8: warning [M0194], unused identifier f9 (delete or rename to wildcard `_` or `_f9`)
diff --git a/test/run/ok/stable-memory-beyond-4GB.tc.ok b/test/run/ok/stable-memory-beyond-4GB.tc.ok
new file mode 100644
index 00000000000..fa980983c7a
--- /dev/null
+++ b/test/run/ok/stable-memory-beyond-4GB.tc.ok
@@ -0,0 +1,2 @@
+stable-memory-beyond-4GB.mo:9.36-9.57: warning [M0199], this code is (or uses) the deprecated library `ExperimentalStableMemory`.
+Please use the `Region` library instead: https://internetcomputer.org/docs/current/motoko/main/stable-memory/stable-regions/#the-region-library or compile with flag `--experimental-stable-memory 1` to suppress this message.
diff --git a/test/run/ok/stable-memory-beyond-4GB.wasm-run.ok b/test/run/ok/stable-memory-beyond-4GB.wasm-run.ok
new file mode 100644
index 00000000000..6840afd1af6
--- /dev/null
+++ b/test/run/ok/stable-memory-beyond-4GB.wasm-run.ok
@@ -0,0 +1,4 @@
+{size = 0}
+{size = 65_537}
+{read = 0}
+{read = 123}
diff --git a/test/run/ok/stable-region-beyond-4GB.tc.ok b/test/run/ok/stable-region-beyond-4GB.tc.ok
new file mode 100644
index 00000000000..3d1aa3648a9
--- /dev/null
+++ b/test/run/ok/stable-region-beyond-4GB.tc.ok
@@ -0,0 +1,2 @@
+stable-region-beyond-4GB.mo:9.36-9.57: warning [M0199], this code is (or uses) the deprecated library `ExperimentalStableMemory`.
+Please use the `Region` library instead: https://internetcomputer.org/docs/current/motoko/main/stable-memory/stable-regions/#the-region-library or compile with flag `--experimental-stable-memory 1` to suppress this message.
diff --git a/test/run/ok/stable-region-beyond-4GB.wasm-run.ok b/test/run/ok/stable-region-beyond-4GB.wasm-run.ok
new file mode 100644
index 00000000000..6840afd1af6
--- /dev/null
+++ b/test/run/ok/stable-region-beyond-4GB.wasm-run.ok
@@ -0,0 +1,4 @@
+{size = 0}
+{size = 65_537}
+{read = 0}
+{read = 123}
diff --git a/test/run/ok/words.run-ir.ok b/test/run/ok/words-classical.run-ir.ok
similarity index 100%
rename from test/run/ok/words.run-ir.ok
rename to test/run/ok/words-classical.run-ir.ok
diff --git a/test/run/ok/words.run-low.ok b/test/run/ok/words-classical.run-low.ok
similarity index 100%
rename from test/run/ok/words.run-low.ok
rename to test/run/ok/words-classical.run-low.ok
diff --git a/test/run/ok/words.run.ok b/test/run/ok/words-classical.run.ok
similarity index 100%
rename from test/run/ok/words.run.ok
rename to test/run/ok/words-classical.run.ok
diff --git a/test/run/ok/words.wasm-run.ok b/test/run/ok/words-classical.wasm-run.ok
similarity index 100%
rename from test/run/ok/words.wasm-run.ok
rename to test/run/ok/words-classical.wasm-run.ok
diff --git a/test/run/ok/words-enhanced.run-ir.ok b/test/run/ok/words-enhanced.run-ir.ok
new file mode 100644
index 00000000000..dea7e52cf7a
--- /dev/null
+++ b/test/run/ok/words-enhanced.run-ir.ok
@@ -0,0 +1,94 @@
+8_912_765
+18_446_744_073_700_638_850
+8_917_332
+8_908_198
+31_969
+652
+2_548
+20_857_489
+4_437
+8_912_895
+8_908_458
+584_576
+35
+-326_582_449_863_721_025
+-2
+-326_582_449_863_721_025
+1_140_833_920
+18_014_398_509_482_053_630
++61
+49
+5
+set
+clear
+set
+8_912_765
+4_286_054_530
+8_917_332
+8_908_198
+31_969
+652
+2_548
+20_857_489
+4_437
+8_912_895
+8_908_458
+584_576
+35
+-76_038_403
+-2
+-76_038_403
+1_140_833_920
+4_194_373_630
++29
+17
+5
+set
+clear
+set
+55_734
+9_801
+60_301
+51_167
+31_969
+652
+930
+17_041
+4_502
+55_799
+51_297
+60_288
+35
+8_190
+-2
+56_172
+28_083
++13
+1
+5
+set
+clear
+set
+34
+221
+101
+223
+213
+9
+34
+137
+2
+99
+97
+128
+0
+30
+-2
+17
+68
++5
+0
+3
+set
+clear
+set
diff --git a/test/run/ok/words-enhanced.run-low.ok b/test/run/ok/words-enhanced.run-low.ok
new file mode 100644
index 00000000000..dea7e52cf7a
--- /dev/null
+++ b/test/run/ok/words-enhanced.run-low.ok
@@ -0,0 +1,94 @@
+8_912_765
+18_446_744_073_700_638_850
+8_917_332
+8_908_198
+31_969
+652
+2_548
+20_857_489
+4_437
+8_912_895
+8_908_458
+584_576
+35
+-326_582_449_863_721_025
+-2
+-326_582_449_863_721_025
+1_140_833_920
+18_014_398_509_482_053_630
++61
+49
+5
+set
+clear
+set
+8_912_765
+4_286_054_530
+8_917_332
+8_908_198
+31_969
+652
+2_548
+20_857_489
+4_437
+8_912_895
+8_908_458
+584_576
+35
+-76_038_403
+-2
+-76_038_403
+1_140_833_920
+4_194_373_630
++29
+17
+5
+set
+clear
+set
+55_734
+9_801
+60_301
+51_167
+31_969
+652
+930
+17_041
+4_502
+55_799
+51_297
+60_288
+35
+8_190
+-2
+56_172
+28_083
++13
+1
+5
+set
+clear
+set
+34
+221
+101
+223
+213
+9
+34
+137
+2
+99
+97
+128
+0
+30
+-2
+17
+68
++5
+0
+3
+set
+clear
+set
diff --git a/test/run/ok/words-enhanced.run.ok b/test/run/ok/words-enhanced.run.ok
new file mode 100644
index 00000000000..dea7e52cf7a
--- /dev/null
+++ b/test/run/ok/words-enhanced.run.ok
@@ -0,0 +1,94 @@
+8_912_765
+18_446_744_073_700_638_850
+8_917_332
+8_908_198
+31_969
+652
+2_548
+20_857_489
+4_437
+8_912_895
+8_908_458
+584_576
+35
+-326_582_449_863_721_025
+-2
+-326_582_449_863_721_025
+1_140_833_920
+18_014_398_509_482_053_630
++61
+49
+5
+set
+clear
+set
+8_912_765
+4_286_054_530
+8_917_332
+8_908_198
+31_969
+652
+2_548
+20_857_489
+4_437
+8_912_895
+8_908_458
+584_576
+35
+-76_038_403
+-2
+-76_038_403
+1_140_833_920
+4_194_373_630
++29
+17
+5
+set
+clear
+set
+55_734
+9_801
+60_301
+51_167
+31_969
+652
+930
+17_041
+4_502
+55_799
+51_297
+60_288
+35
+8_190
+-2
+56_172
+28_083
++13
+1
+5
+set
+clear
+set
+34
+221
+101
+223
+213
+9
+34
+137
+2
+99
+97
+128
+0
+30
+-2
+17
+68
++5
+0
+3
+set
+clear
+set
diff --git a/test/run/ok/words-enhanced.wasm-run.ok b/test/run/ok/words-enhanced.wasm-run.ok
new file mode 100644
index 00000000000..dea7e52cf7a
--- /dev/null
+++ b/test/run/ok/words-enhanced.wasm-run.ok
@@ -0,0 +1,94 @@
+8_912_765
+18_446_744_073_700_638_850
+8_917_332
+8_908_198
+31_969
+652
+2_548
+20_857_489
+4_437
+8_912_895
+8_908_458
+584_576
+35
+-326_582_449_863_721_025
+-2
+-326_582_449_863_721_025
+1_140_833_920
+18_014_398_509_482_053_630
++61
+49
+5
+set
+clear
+set
+8_912_765
+4_286_054_530
+8_917_332
+8_908_198
+31_969
+652
+2_548
+20_857_489
+4_437
+8_912_895
+8_908_458
+584_576
+35
+-76_038_403
+-2
+-76_038_403
+1_140_833_920
+4_194_373_630
++29
+17
+5
+set
+clear
+set
+55_734
+9_801
+60_301
+51_167
+31_969
+652
+930
+17_041
+4_502
+55_799
+51_297
+60_288
+35
+8_190
+-2
+56_172
+28_083
++13
+1
+5
+set
+clear
+set
+34
+221
+101
+223
+213
+9
+34
+137
+2
+99
+97
+128
+0
+30
+-2
+17
+68
++5
+0
+3
+set
+clear
+set
diff --git a/test/run/optimise-for-array.mo b/test/run/optimise-for-array-classical.mo
similarity index 90%
rename from test/run/optimise-for-array.mo
rename to test/run/optimise-for-array-classical.mo
index 45f46d115fb..703071a5a9b 100644
--- a/test/run/optimise-for-array.mo
+++ b/test/run/optimise-for-array-classical.mo
@@ -1,3 +1,4 @@
+//CLASSICAL-PERSISTENCE-ONLY
 //MOC-FLAG -fshared-code
 import Prim "mo:⛔";
 
@@ -6,13 +7,13 @@ import Prim "mo:⛔";
 // FHECK: (local $check0 i32)
 
 // FHECK-NOT:  call $@immut_array_size
-// DON'TFHECK: i32.load offset=9
+// DON'TFHECK: i32.load offset=(5 or 9) 
 // FHECK:      i32.load offset= 
 // FHECK:      i32.const 2
 // FHECK:      i32.shl
 // FHECK:      i32.lt_u
 // FHECK:      i32.add
-// DON'TFHECK: i32.load offset=13
+// DON'TFHECK: i32.load offset=(9 or 13)
 // FHECK:      local.tee $check0
 // FHECK-NEXT: call $print_text
 // FHECK:      i32.const 4
@@ -21,13 +22,13 @@ for (check0 in ["hello", "world"].vals()) { Prim.debugPrint check0 };
 
 
 // FHECK-NOT:  call $@mut_array_size
-// DON'TFHECK: i32.load offset=9
+// DON'TFHECK: i32.load offset=(5 or 9)
 // FHECK:      i32.load offset=
 // FHECK:      i32.const 2
 // FHECK-NEXT: i32.shl
 // FHECK:      i32.lt_u
 // FHECK:      i32.add
-// DON'TFHECK: i32.load offset=13
+// DON'TFHECK: i32.load offset=(9 or 13)
 // FHECK:      i32.load offset=
 // FHECK-NEXT: local.tee $check1
 // FHECK-NEXT: call $print_text
@@ -36,7 +37,7 @@ for (check1 in [var "hello", "mutable", "world"].vals()) { Prim.debugPrint check
 let array = [var "hello", "remutable", "world"];
 array[1] := "mutable";
 // FHECK-NOT:   call $@immut_array_size
-// DON'TFHECK:  i32.load offset=9
+// DON'TFHECK:  i32.load offset=(5 or 9)
 // FHECK:       i32.load offset=
 // FHECK:       i32.const 2
 // FHECK:       i32.shl
@@ -48,13 +49,13 @@ array[1] := "mutable";
 for (check2 in array.vals()) { Prim.debugPrint check2 };
 
 // FHECK-NOT:  call $@immut_array_size
-// DON'TFHECK: i32.load offset=9
+// DON'TFHECK: i32.load offset=(5 or 9)
 // FHECK:      i32.load offset=
 // FHECK:      i32.const 2
 // FHECK:      i32.shl
 // FHECK:      i32.lt_u
 // FHECK:      i32.add
-// DON'TFHECK: i32.load offset=13
+// DON'TFHECK: i32.load offset=(9 or 13)
 // FHECK:      i32.load offset=
 // FHECK-NEXT: local.tee $check3
 // interfering parentheses don't disturb us
@@ -95,7 +96,7 @@ check6[1] := "mutable";
 // this passes the IR type check, which demonstrates that no name capture happens
 for (check6 in check6.vals()) { ignore check6 };
 
-// DON'TFHECK: i32.load offset=9
+// DON'TFHECK: i32.load offset=(5 or 9)
 // FHECK:      i32.load offset=
 // FHECK:      i32.const 2
 // FHECK:      i32.shl
@@ -104,7 +105,7 @@ for (check7 in [].vals(Prim.debugPrint "want to see you")) { };
 
 // FHECK:      local.set $num8
 // FHECK-NOT:  call $@immut_array_size
-// DON'TFHECK: i32.load offset=9
+// DON'TFHECK: i32.load offset=(5 or 9)
 // FHECK:      i32.load offset=
 // FHECK:      i32.const 1
 // FHECK:      i32.shl
diff --git a/test/run/optimise-for-array-enhanced.mo b/test/run/optimise-for-array-enhanced.mo
new file mode 100644
index 00000000000..e7ef94a27d8
--- /dev/null
+++ b/test/run/optimise-for-array-enhanced.mo
@@ -0,0 +1,131 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+//MOC-FLAG -fshared-code
+import Prim "mo:⛔";
+
+// CHECK: (local $check0 i64)
+
+// CHECK-NOT:  call $@immut_array_size
+// DON'TCHECK: i64.load offset=17
+// CHECK:      i64.load offset= 
+// CHECK:      i64.const 2
+// CHECK:      i64.shr_s
+// CHECK-NEXT: i64.const 3
+// CHECK-NEXT: i64.shl
+// CHECK-NEXT: i64.add
+// CHECK:      local.tee $check0
+// CHECK:      i64.const 4
+// CHECK:      i64.add
+for (check0 in ["hello", "world"].vals()) { Prim.debugPrint check0 };
+
+
+// CHECK-NOT:  call $@mut_array_size
+// DON'TCHECK: i64.load offset=17
+// FIX-CHECK:      i64.const 2
+// FIX-CHECK:      i64.shr_s
+// FIX-CHECK:      i64.const 3
+// FIX-CHECK:      i64.shl
+// FIX-CHECK:      i64.add
+// DON'TCHECK: i64.load offset=25
+// CHECK:      i64.load offset=
+// CHECK:      local.tee $check1
+// CHECK:      call $print_ptr
+// CHECK:      i64.const 4
+// CHECK:      i64.add
+for (check1 in [var "hello", "mutable", "world"].vals()) { Prim.debugPrint check1 };
+
+let array = [var "hello", "remutable", "world"];
+array[1] := "mutable";
+// FIX-CHECK-NOT:   call $@immut_array_size
+// DON'TCHECK:  i64.load offset=17
+// FIX-CHECK:       i64.load offset=
+// FIX-CHECK:       i64.const 2
+// FIX-CHECK:       i64.shr_s
+// DON'T-CHECK: i64.lt_u
+// DON'T-CHECK: local.get $array
+// DON'T-CHECK: local.set $check2
+// `arr` being a `VarE` already (but we rebind anyway, otherwise we open a can of worms)
+// later when we have path compression for variables in the backend, we can bring this back
+for (check2 in array.vals()) { Prim.debugPrint check2 };
+
+// FIX-CHECK-NOT:  call $@immut_array_size
+// DON'TCHECK: i64.load offset=17
+// FIX-CHECK:      i64.load offset=
+// FIX-CHECK:      i64.const 2
+// FIX-CHECK-NEXT: i64.shr_s
+// FIX-CHECK:      i64.lt_u
+// FIX-CHECK:      i64.add
+// DON'TCHECK: i64.load offset=25
+// FIX-CHECK:      local.tee $check3
+// interfering parentheses don't disturb us
+for (check3 in (((["hello", "immutable", "world"].vals())))) { Prim.debugPrint check3 };
+
+
+// FIX-CHECK:      i64.const 170
+// FIX-CHECK:      call $B_add
+// FIX-CHECK-NEXT: call $B_eq
+// FIX-CHECK-NEXT: i32.wrap_i64
+// FIX-CHECK-NEXT: if
+// FIX-CHECK-NEXT: loop
+// FIX-CHECK-NEXT: br 0
+// FIX-CHECK-NEXT: end
+// FIX-CHECK-NEXT: unreachable
+// FIX-CHECK-NEXT: else
+// bottom iteration expression is treated fairly
+var c = 42;
+if (c == c + 1) {
+    for (check4 in (loop {}).vals()) { Prim.debugPrint check4 }
+};
+
+// FIX-CHECK:      call $B_add
+// FIX-CHECK-NEXT: call $B_eq
+// FIX-CHECK-NEXT: i32.wrap_i64
+// FIX-CHECK-NEXT: if
+// FIX-CHECK-NEXT: loop
+// FIX-CHECK-NEXT: br 0
+// FIX-CHECK-NEXT: end
+// FIX-CHECK-NEXT: unreachable
+// FIX-CHECK-NEXT: else
+// typed bottom iteration expression is treated fairly
+if (c == c + 1) {
+    for (check5 in ((loop {}) : [Text]).vals()) { Prim.debugPrint check5 }
+};
+
+let check6 = [var "hello", "immutable", "world"];
+check6[1] := "mutable";
+// `check6` being a `VarE` already and iteration variable is named identically
+// this passes the IR type check, which demonstrates that no name capture happens
+for (check6 in check6.vals()) { ignore check6 };
+
+// DON'TCHECK: i64.load offset=17
+// FIX-CHECK:      i64.load offset=
+// FIX-CHECK:      i64.const 3
+// FIX-CHECK:      i64.shl
+// argument to vals can have an effect too, expect it
+for (check7 in [].vals(Prim.debugPrint "want to see you")) { };
+
+// FIX-CHECK:      local.set $num8
+// FIX-CHECK-NOT:  call $@immut_array_size
+// CON'TFHECK: i64.load offset=17
+// FIX-CHECK:      i64.load offset=
+// FIX-CHECK:      i64.const 2
+// FIX-CHECK:      i64.shr_s
+// FIX-CHECK:      i64.lt_u
+// FIX-CHECK-NOT:  i64.add
+// FIX-CHECK:      local.tee $check8
+// FIX-CHECK-NEXT: local.get $num8
+// FIX-CHECK-NEXT: call $B_add
+var num8 = 42;
+num8 := 25;
+// `keys` is even easier to rewrite, as the "indexing expression" is just the
+// indexing variable itself
+for (check8 in ["hello", "keyed", "world"].keys()) { ignore (check8 + num8) };
+
+// polymorphic arrays should still work
+func f9(array : [A]) {
+  for (check9 in array.keys()) { }
+};
+
+// make sure that one-byte-sized elements still work
+var sum10 : Nat8 = 0;
+for (check10 in ([3, 5, 7, 11] : [Nat8]).vals()) { sum10 += check10 };
+assert sum10 == 26
diff --git a/test/run/refuted-const-float.mo b/test/run/refuted-const-float.mo
index aed68a0adac..8e2e839c321 100644
--- a/test/run/refuted-const-float.mo
+++ b/test/run/refuted-const-float.mo
@@ -1,11 +1,5 @@
 // a failing pattern match that can be compiled to a trap
 let 0.67 = 3.14;
 
-// CHECK: (func $init (type
-// CHECK: call $blob_of_principal
-// CHECK: i32.const 14
-// CHECK: call $print_ptr
-// CHECK-NEXT: unreachable)
-
 //SKIP run-low
 //SKIP run-ir
diff --git a/test/run/refuted-const-option-null.mo b/test/run/refuted-const-option-null.mo
index 68f285a6fc8..ccc65afd6c9 100644
--- a/test/run/refuted-const-option-null.mo
+++ b/test/run/refuted-const-option-null.mo
@@ -1,11 +1,5 @@
 // a failing pattern match that can be compiled to a trap
 let ?b = null;
 
-// CHECK: (func $init (type
-// CHECK: call $blob_of_principal
-// CHECK: i32.const 14
-// CHECK: call $print_ptr
-// CHECK-NEXT: unreachable)
-
 //SKIP run-low
 //SKIP run-ir
diff --git a/test/run/refuted-const-option.mo b/test/run/refuted-const-option.mo
index e47f347854f..37e944c1f19 100644
--- a/test/run/refuted-const-option.mo
+++ b/test/run/refuted-const-option.mo
@@ -1,11 +1,5 @@
 // a failing pattern match that can be compiled to a trap
 let null = ?42;
 
-// CHECK: (func $init (type
-// CHECK: call $blob_of_principal
-// CHECK: i32.const 14
-// CHECK: call $print_ptr
-// CHECK-NEXT: unreachable)
-
 //SKIP run-low
 //SKIP run-ir
diff --git a/test/run/refuted-const-variant.mo b/test/run/refuted-const-variant.mo
index 90fc698290f..161d096126c 100644
--- a/test/run/refuted-const-variant.mo
+++ b/test/run/refuted-const-variant.mo
@@ -1,11 +1,5 @@
 // a failing pattern match that can be compiled to a trap
 let (#const b) = #bummer;
 
-// CHECK: (func $init (type
-// CHECK: call $blob_of_principal
-// CHECK: i32.const 14
-// CHECK: call $print_ptr
-// CHECK-NEXT: unreachable)
-
 //SKIP run-low
 //SKIP run-ir
diff --git a/test/run/stable-memory-beyond-4GB.mo b/test/run/stable-memory-beyond-4GB.mo
new file mode 100644
index 00000000000..cd737c8e74c
--- /dev/null
+++ b/test/run/stable-memory-beyond-4GB.mo
@@ -0,0 +1,25 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+//MOC-FLAG --max-stable-pages=1000000
+import Prim "mo:prim";
+
+let size: Nat64 = 4 * 1024 * 1024 * 1024 + 1; // 5 GB
+let wasmPageSize: Nat64 = 64 * 1024; // 64 KB
+let numberOfPages: Nat64 = (size + wasmPageSize - 1) / wasmPageSize;
+
+Prim.debugPrint(debug_show {size = Prim.stableMemorySize()});
+let result = Prim.stableMemoryGrow(numberOfPages);
+assert(result == 0);
+Prim.debugPrint(debug_show {size = Prim.stableMemorySize()});
+let location = size - 1;
+let initial = Prim.stableMemoryLoadNat8(location);
+assert(initial == 0);
+Prim.debugPrint(debug_show {read = Prim.stableMemoryLoadNat8(location)});
+let testValue: Nat8 = 123;
+Prim.stableMemoryStoreNat8(location, testValue);
+Prim.debugPrint(debug_show {read = Prim.stableMemoryLoadNat8(location)});
+let actual = Prim.stableMemoryLoadNat8(location);
+assert(actual == testValue);
+
+//SKIP run-low
+//SKIP run
+//SKIP run-ir
diff --git a/test/run/stable-region-beyond-4GB.mo b/test/run/stable-region-beyond-4GB.mo
new file mode 100644
index 00000000000..88dd581d2fa
--- /dev/null
+++ b/test/run/stable-region-beyond-4GB.mo
@@ -0,0 +1,26 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+//MOC-FLAG --max-stable-pages=1000000
+import Prim "mo:prim";
+
+let size: Nat64 = 4 * 1024 * 1024 * 1024 + 1; // 5 GB
+let wasmPageSize: Nat64 = 64 * 1024; // 64 KB
+let numberOfPages: Nat64 = (size + wasmPageSize - 1) / wasmPageSize;
+
+Prim.debugPrint(debug_show {size = Prim.stableMemorySize()});
+let r1 = Prim.regionNew();
+let result = Prim.regionGrow(r1, numberOfPages);
+assert(result == 0);
+Prim.debugPrint(debug_show {size = Prim.regionSize(r1)});
+let location = size - 1;
+let initial = Prim.regionLoadNat8(r1, location);
+assert(initial == 0);
+Prim.debugPrint(debug_show {read = Prim.regionLoadNat8(r1, location)});
+let testValue: Nat8 = 123;
+Prim.regionStoreNat8(r1, location, testValue);
+Prim.debugPrint(debug_show {read = Prim.regionLoadNat8(r1, location)});
+let actual = Prim.regionLoadNat8(r1, location);
+assert(actual == testValue);
+
+//SKIP run-low
+//SKIP run
+//SKIP run-ir
diff --git a/test/run/unboxed-let.mo b/test/run/unboxed-let.mo
index 2831ad26d93..3167083c4eb 100644
--- a/test/run/unboxed-let.mo
+++ b/test/run/unboxed-let.mo
@@ -35,69 +35,39 @@ func goFloat() {
 };
 goFloat();
 
-// This just checks that the above CHECK-NOT: box_i32 is up-to-date
-// (If that function gets renamed the test above could yeild false successes
-func goValidNat32(x : Nat32) { assert (x *% x == 4); }; goValidNat32(2);
+// This just checks that the above CHECK-NOT: box_i64 is up-to-date
+// (If that function gets renamed the test above could yield false successes
 func goValidNat64(x : Nat64) { assert (x *% x == 4); }; goValidNat64(2);
 func goValidFloat(x : Float) { assert (x * x == 4); }; goValidFloat(2);
 
 // We have to do the filechecks in reverse order:
 
-// FHECK: func $goValidFloat
-// FHECK: f64.load
-// FHECK: unreachable
+// CHECK: func $goValidFloat
+// CHECK: f64.load
+// CHECK: unreachable
 
-// FHECK: func $goValidNat64
-// FHECK: unbox_i64
-// FHECK: unreachable
+// CHECK: func $goValidNat64
+// CHECK: unbox64
+// CHECK: unreachable
 
-// FHECK: func $goValidNat32
-// FHECK: unbox_i32
-// FHECK: unreachable
+// CHECK: func $goFloat
+// CHECK-NOT: f64.load
+// CHECK-STORE: f64.load
+// CHECK: unreachable
 
-// FHECK: func $goFloat
-// FHECK-NOT: f64.load
-// FHECK-STORE: f64.load
-// FHECK: unreachable
-
-// FHECK: func $goInt64
-// FHECK-NOT: box_i64
-// FHECK-NOT: unbox_i64
-// FHECK: unreachable
-
-// FHECK: func $goNat64
-// FHECK-NOT: box_i64
-// FHECK-NOT: unbox_i64
-// FHECK: unreachable
-
-// FHECK: func $goInt32
-// FHECK-NOT: box_i32
-// FHECK-NOT: unbox_i32
-// FHECK: unreachable
-
-// FHECK: func $goNat32
-// FHECK-NOT: box_i32
-// FHECK-NOT: unbox_i32
-// FHECK: unreachable
+// CHECK: func $goInt64
+// CHECK-NOT: box64
+// CHECK-NOT: unbox64
+// CHECK: unreachable
 
+// CHECK: func $goNat64
+// CHECK-NOT: box64
+// CHECK-NOT: unbox64
+// CHECK: unreachable
 
 
 // Also test that the value is included in a closure properly
 
-func goCaptureNat32() : () -> () {
- let x : Nat32 = 1 +% 1;
- var y : Nat32 = 1 +% 1;
- return func() { assert(x == 2); assert(y == 2)};
-};
-goCaptureNat32()();
-
-func goCaptureInt32() : () -> () {
- let x : Int32 = 1 +% 1;
- var y : Int32 = 1 +% 1;
- return func() { assert(x == 2); assert(y == 2)};
-};
-goCaptureInt32()();
-
 func goCaptureNat64() : () -> () {
  let x : Nat64 = 1 +% 1;
  var y : Nat64 = 1 +% 1;
diff --git a/test/run/variant-specials.mo b/test/run/variant-specials.mo
index 74d717ec9ea..822f4be805a 100644
--- a/test/run/variant-specials.mo
+++ b/test/run/variant-specials.mo
@@ -15,13 +15,13 @@ func specials(one : { #one : Nat }, two : { #c0; #c1 }) {
 specials(#one 42, #c1)
 
 //CHECK: func $specials
-//CHECK-NOT: i32.const 5544550
+//CHECK-NOT: {{i32|i64}}.const 5544550
 //CHECK: local.set $count
 
-//CHECK: i32.const 22125
+//CHECK: {{i32|i64}}.const 22125
 //CHECK: local.set $count
 
-//CHECK-NOT: i32.const 22126
+//CHECK-NOT: {{i32|i64}}.const 22126
 //CHECK: local.set $count
 
-//CHECK: i32.const 494
+//CHECK: {{i32.const 246|i64.const 494}}
diff --git a/test/run/words.mo b/test/run/words-classical.mo
similarity index 99%
rename from test/run/words.mo
rename to test/run/words-classical.mo
index 15a68eef250..bff3b65ef75 100644
--- a/test/run/words.mo
+++ b/test/run/words-classical.mo
@@ -1,3 +1,4 @@
+//CLASSICAL-PERSISTENCE-ONLY
 //MOC-FLAG -fshared-code
 import Prim "mo:⛔";
 
diff --git a/test/run/words-enhanced.mo b/test/run/words-enhanced.mo
new file mode 100644
index 00000000000..7e828e2da0d
--- /dev/null
+++ b/test/run/words-enhanced.mo
@@ -0,0 +1,340 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+//MOC-FLAG -fshared-code
+import Prim "mo:⛔";
+
+// CHECK: func $init
+
+// CHECK: i64.const 424242
+assert not (424242 : Nat64 == 1);
+assert not (3 : Nat64 == 0);
+assert (0 : Nat64 == 0);
+// CHECK: i64.const 424342
+assert not (424342 : Nat64 == 1);
+
+// CHECK: i64.const {{1823394005778432|1823395079520256}}
+assert not (424542 : Nat32 == 1);
+assert not (3 : Nat32 == 0);
+assert (0 : Nat32 == 0);
+// CHECK: i64.const {{1824682495967232|1824683569709056}}
+assert not (424842 : Nat32 == 1);
+
+func printBit(a : Bool) { Prim.debugPrint(if a "set" else "clear") };
+
+
+func checkpointAlpha() { assert true; };
+func checkpointBravo() { assert true; };
+func checkpointCharlie() { assert true; };
+func checkpointDelta() { assert true; };
+func checkpointEcho() { assert true; };
+func checkpointFoxtrot() { assert true; };
+func checkpointGolf() { assert true; };
+func checkpointHotel() { assert true; };
+func checkpointIndia() { assert true; };
+func checkpointJuliett() { assert true; };
+
+// Nat64 operations
+do {
+    func printN64ln(w : Nat64) {
+      Prim.debugPrintNat(Prim.nat64ToNat w);
+    };
+    func printI64ln(w : Int64) {
+      Prim.debugPrintInt(Prim.int64ToInt w);
+    };
+
+    let a : Nat64 = 4567;
+    let b : Nat64 = 7;
+    let c : Nat64 = 8912765;
+    let d : Int64 = -15;
+    let e : Nat64 = 20000;
+
+// this is the value of c
+// CHECK: i64.const 142604244
+// CHECK-NOT: call $box64
+// CHECK: call $printN64ln
+    printN64ln(c);
+    printN64ln(^c);
+    printN64ln(a +% c);
+    printN64ln(c -% a);
+
+// CHECK: call $checkpointAlpha
+    checkpointAlpha();
+// This is a native Wasm i64 multiplication, there should be no shift involved!
+// CHECK-NOT: i64.shr_u
+// CHECK: call $printN64ln
+    printN64ln(a * b);
+
+    printN64ln(a / b);
+    printN64ln(c % a);
+    printN64ln(a ** 2);
+
+    printN64ln(a & c);
+    printN64ln(a | c);
+    printN64ln(a ^ c);
+    printN64ln(a << b);
+    printN64ln(a >> b);
+    printI64ln(-5225319197819536385 >> 4); // 0b1011011101111011111011111101111111011111111011111111101111111111L == -5225319197819536385L --> 826339054743125951L
+    printI64ln(d >> 3);
+    printI64ln(-5225319197819536385 >> 4); // 0b1011011101111011111011111101111111011111111011111111101111111111L == -5225319197819536385L --> -326582449863721025L
+
+    printN64ln(c <<> b);
+    printN64ln(c <>> b);
+    printI64ln(Prim.popcntInt64 d); // -15 = 0xfffffffffffffff1 = 0b1111_..._1111_1111_0001 (population = 61)
+    printN64ln(Prim.clzNat64 e); // 20000 = 0x0000000000004e20 (leading zeros = 49)
+    printN64ln(Prim.ctzNat64 e); // 20000 = 0x0000000000004e20 (trailing zeros = 5)
+    printBit(Prim.btstNat64(e, 5 : Nat64)); // 20000 = 0x0000000000004e20 (result = true)
+    printBit(Prim.btstNat64(e, 63 : Nat64)); // 20000 = 0x0000000000004e20 (result = false)
+    printBit(Prim.btstNat64(e, 69 : Nat64)); // 20000 = 0x0000000000004e20 (mod 64, result = true)
+
+    assert (3 : Nat64 ** (4 : Nat64) == (81 : Nat64));
+    assert (3 : Nat64 ** (7 : Nat64) == (2187 : Nat64));
+    assert (3 : Nat64 ** (14 : Nat64) == (4782969 : Nat64));
+    assert (3 : Nat64 ** (20 : Nat64) == (3486784401 : Nat64));
+};
+
+
+
+// Nat32 operations
+do {
+    func printN32ln(w : Nat32) {
+      Prim.debugPrintNat(Prim.nat32ToNat w);
+    };
+    func printI32ln(w : Int32) {
+      Prim.debugPrintInt(Prim.int32ToInt w);
+    };
+
+    let a : Nat32 = 4567;
+    let b : Nat32 = 7;
+    let c : Nat32 = 8912765;
+    let d : Int32 = -15;
+    let e : Nat32 = 20000;
+
+// CHECK: call $checkpointBravo
+    checkpointBravo();
+// this is the value of c
+// CHECK: i64.const 38280035265675264
+// CHECK-NOT: call $box64
+// CHECK: call $printN32ln
+    printN32ln(c);
+    printN32ln(^c);
+    printN32ln(a + c);
+    printN32ln(c - a);
+
+// CHECK: call $checkpointCharlie
+    checkpointCharlie();
+// This is a native Wasm i64 multiplication, there should be no shift involved!
+// CHECK-NOT: i64.shr_u
+// CHECK: call $printN32ln
+    printN32ln(a * b);
+    printN32ln(a / b);
+    printN32ln(c % a);
+    printN32ln(a ** 2);
+
+    printN32ln(a & c);
+    printN32ln(a | c);
+    printN32ln(a ^ c);
+    printN32ln(a << b);
+    printN32ln(a >> b);
+    printI32ln(-1216614433 >> 4); // 0b10110111011110111110111111011111l == -1216614433l --> 192397053l
+    printI32ln(d >> 3);
+    printI32ln(-1216614433 >> 4); // 0b10110111011110111110111111011111l == -1216614433l --> -76038403
+    printN32ln(c <<> b);
+    printN32ln(c <>> b);
+    printI32ln(Prim.popcntInt32 d); // -15 = 0xfffffff1 = 0b1111_1111_1111_1111_1111_1111_1111_0001 (population = 29)
+    printN32ln(Prim.clzNat32 e); // 20000 = 0x00004e20 (leading zeros = 17)
+    printN32ln(Prim.ctzNat32 e); // 20000 = 0x00004e20 (trailing zeros = 5)
+    printBit(Prim.btstNat32(e, 5 : Nat32)); // 20000 = 0x00004e20 (result = true)
+    printBit(Prim.btstNat32(e, 31 : Nat32)); // 20000 = 0x00004e20 (result = false)
+    printBit(Prim.btstNat32(e, 37 : Nat32)); // 20000 = 0x00004e20 (mod 32, result = true)
+
+    assert (3 : Nat32 ** (4 : Nat32) == (81 : Nat32));
+    assert (3 : Nat32 ** (7 : Nat32) == (2187 : Nat32));
+    assert (3 : Nat32 ** (14 : Nat32) == (4782969 : Nat32));
+    assert (3 : Nat32 ** (20 : Nat32) == (3486784401 : Nat32));
+};
+
+// Nat16 operations
+do {
+    func printN16ln(w : Nat16) {
+      Prim.debugPrintNat(Prim.nat16ToNat w);
+    };
+    func printI16ln(w : Int16) {
+      Prim.debugPrintInt(Prim.int16ToInt w);
+    };
+
+    let a : Nat16 = 4567;
+    let b : Nat16 = 7;
+    let c : Nat16 = 55734;
+    let d : Int16 = -15;
+    let e : Nat16 = 20000;
+
+
+    printN16ln(c);
+    printN16ln(^c);
+    printN16ln(a +% c);
+    printN16ln(c -% a);
+
+// CHECK: call $checkpointDelta
+    checkpointDelta();
+// this is the value of a
+// CHECK: i64.const {{1285496218637565952|1285566587381743616}}
+// this is the value of b
+// CHECK: i64.const {{1970324836974592|2040693581152256}}
+// This is not a native Wasm i64 multiplication, we need to shift one of the args left by 16 bits!
+// CHECK: i64.const 48
+// CHECK-NEXT: i64.shr_u
+// CHECK-NEXT: i64.mul
+// CHECK-NEXT: i64.const 70368744177664
+// CHECK-NEXT: i64.or
+// CHECK-NEXT: call $printN16ln
+    printN16ln(a *% b);
+    printN16ln(a / b);
+    printN16ln(c % a);
+    printN16ln(a **% 2);
+
+    printN16ln(a & c);
+    printN16ln(a | c);
+    printN16ln(a ^ c);
+    printN16ln(a << b);
+
+// CHECK: call $checkpointEcho
+   checkpointEcho();
+// this is the value of b
+// CHECK: i64.const {{1970324836974592|2040693581152256}}
+// This is not a native Wasm i32 left shift, we need to shift the second arg left by 16 bits and clamp it to 4 bits!
+// CHECK: i64.const 48
+// CHECK-NEXT: i64.shr_u
+// CHECK-NEXT: i64.const 15
+// CHECK-NEXT: i64.and
+// CHECK-NEXT: i64.shr_u
+// Then the result must be sanitised.
+// CHECK-NEXT: i64.const -281474976710656
+// CHECK-NEXT: i64.and
+// CHECK-NEXT: i64.const 70368744177664
+// CHECK-NEXT: i64.or
+// CHECK-NEXT: call $printN16ln
+    printN16ln(a >> b);
+    printN16ln(Prim.int16ToNat16(d) >> 3); // -15 = 0xfff1 = 0b1111_1111_1111_0001 (shifted = 0b0001_1111_1111_1110 = 8190)
+    printI16ln(d >> 3); // -15 = 0xfff1 = 0b1111_1111_1111_0001 (shifted = 0b1111_1111_1111_1110 = -2)
+
+// CHECK: call $checkpointFoxtrot
+   checkpointFoxtrot();
+// this is the value of b
+// CHECK: i64.const {{1970324836974592|2040693581152256}}
+// CHECK: call $printN16ln
+    printN16ln(c <<> b);
+
+// CHECK: call $checkpointGolf
+   checkpointGolf();
+// this is the value of b
+// CHECK: i64.const {{1970324836974592|2040693581152256}}
+// CHECK: i64.rotr
+    printN16ln(c <>> b);
+    printI16ln(Prim.popcntInt16 d); // -15 = 0xfff1 = 0b1111_1111_1111_0001 (population = 13)
+    printN16ln(Prim.clzNat16 e); // 20000 = 0x4e20 (leading zeros = 1)
+    printN16ln(Prim.ctzNat16 e); // 20000 = 0x4e20 (trailing zeros = 5)
+    printBit(Prim.btstNat16(e, 5 : Nat16)); // 20000 = 0x4e20 (result = true)
+    printBit(Prim.btstNat16(e, 15 : Nat16)); // 20000 = 0x4e20 (result = false)
+    printBit(Prim.btstNat16(e, 21 : Nat16)); // 20000 = 0x4e20 (mod 16, result = true)
+
+
+    assert (3 : Nat16 ** (0 : Nat16) == (1 : Nat16));
+    assert (3 : Nat16 ** (1 : Nat16) == (3 : Nat16));
+    assert (3 : Nat16 ** (4 : Nat16) == (81 : Nat16));
+    assert (3 : Nat16 ** (7 : Nat16) == (2187 : Nat16));
+};
+
+// Nat8 operations
+do {
+    func printN8ln(w : Nat8) {
+      Prim.debugPrintNat(Prim.nat8ToNat w);
+    };
+    func printI8ln(w : Int8) {
+      Prim.debugPrintInt(Prim.int8ToInt w);
+    };
+
+    let a : Nat8 = 67;
+    let b : Nat8 = 7;
+    let c : Nat8 = 34;
+    let d : Int8 = -15;
+    let e : Nat8 = 200;
+
+
+    printN8ln(c);
+    printN8ln(^c);
+    printN8ln(a + c);
+    printN8ln(c -% a);
+// CHECK: call $checkpointHotel
+    checkpointHotel();
+// this is the value of b
+// CHECK: i64.const {{504403158265495552|522417556774977536}}
+// This is not a native Wasm i64 multiplication, we need to shift one of the args left by 56 bits!
+// CHECK: i64.const 56
+// CHECK-NEXT: i64.shr_u
+// CHECK-NEXT: i64.mul
+// CHECK: call $printN8ln
+    printN8ln(a *% b);
+    printN8ln(a / b);
+    printN8ln(c % a);
+    printN8ln(a **% 2);
+
+    printN8ln(a & c);
+    printN8ln(a | c);
+    printN8ln(a ^ c);
+    printN8ln(a << b);
+
+// CHECK: call $checkpointIndia
+    checkpointIndia();
+// this is the value of b
+// CHECK: i64.const {{504403158265495552|522417556774977536}}
+// This is not a native Wasm i64 left shift, we need to shift the second arg left by 56 bits and clamp it to 3 bits!
+// CHECK: i64.const 56
+// CHECK-NEXT: i64.shr_u
+// CHECK-NEXT: i64.const 7
+// CHECK-NEXT: i64.and
+// CHECK-NEXT: i64.shr_u
+// Then the result must be sanitised.
+// CHECK-NEXT: i64.const -72057594037927936
+// CHECK-NEXT: i64.and
+// CHECK-NEXT: i64.const 18014398509481984
+// CHECK-NEXT: i64.or
+// CHECK-NEXT: call $printN8ln
+    printN8ln(a >> b);
+    printN8ln(Prim.int8ToNat8(d) >> 3); // -15 = 0xf1 = 0b1111_0001 (shifted = 0b0001_1110 = 30)
+    printI8ln(d >> 3); // -15 = 0xf1 = 0b1111_0001 (shifted = 0b1111_1110 = -2)
+
+// CHECK: call $checkpointJuliett
+    checkpointJuliett();
+// this is the value of b
+// CHECK: i64.const {{504403158265495552|522417556774977536}}
+// CHECK: call $printN8ln
+    printN8ln(c <<> b);
+// this is the value of b
+// CHECK: i64.const {{504403158265495552|522417556774977536}}
+// CHECK: call $printN8ln
+    printN8ln(c <>> b);
+    printI8ln(Prim.popcntInt8 d); // -15 = 0xf1 = 0b1111_0001 (population = 5)
+    printN8ln(Prim.clzNat8 e); // 200 = 0xC8 (leading zeros = 0)
+    printN8ln(Prim.ctzNat8 e); // 200 = 0xC8 (trailing zeros = 3)
+    printBit(Prim.btstNat8(e, 3 : Nat8)); // 200 = 0xC8 (result = true)
+    printBit(Prim.btstNat8(e, 5 : Nat8)); // 200 = 0xC8 (result = false)
+    printBit(Prim.btstNat8(e, 11 : Nat8)); // 200 = 0xC8 (mod 8, result = true)
+
+    assert (3 : Nat8 ** (0 : Nat8) == (1 : Nat8));
+    assert (3 : Nat8 ** (3 : Nat8) == (27 : Nat8));
+    assert (3 : Nat8 ** (4 : Nat8) == (81 : Nat8));
+    assert (3 : Nat8 ** (5 : Nat8) == (243 : Nat8));
+};
+
+
+// check whether patterns work
+
+func w8 (n : Nat8) = assert (switch n { case 0 false; case 1 false; case 42 true; case _ false });
+func w16 (n : Nat16) = assert (switch n { case 0 false; case 1 false; case 65000 true; case _ false });
+func w32 (n : Nat32) = assert (switch n { case 0 false; case 1 false; case 4_294_967_295 true; case _ false });
+func w64 (n : Nat64) = assert (switch n { case 0 false; case 1 false; case 42 true; case _ false });
+
+
+w8 42;
+w16 65000;
+w32 4_294_967_295;
+w64 42;
diff --git a/test/trap/divInt32.mo b/test/trap/divInt32-classical.mo
similarity index 72%
rename from test/trap/divInt32.mo
rename to test/trap/divInt32-classical.mo
index e0eb97d7590..b55c0c177f2 100644
--- a/test/trap/divInt32.mo
+++ b/test/trap/divInt32-classical.mo
@@ -1,2 +1,3 @@
+//CLASSICAL-PERSISTENCE-ONLY
 import Prim "mo:⛔";
 let _ = Prim.intToInt32 (-0x80000000) / (-1 : Int32)
diff --git a/test/trap/divInt32-enhanced.mo b/test/trap/divInt32-enhanced.mo
new file mode 100644
index 00000000000..5a2a62c261a
--- /dev/null
+++ b/test/trap/divInt32-enhanced.mo
@@ -0,0 +1,3 @@
+//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY
+import Prim "mo:⛔";
+let _ = Prim.intToInt32 (-0x80000000) / (-1 : Int32)
diff --git a/test/trap/mulInt8-upper.mo b/test/trap/mulInt8-upper.mo
index e9173faf73e..c536889dd3a 100644
--- a/test/trap/mulInt8-upper.mo
+++ b/test/trap/mulInt8-upper.mo
@@ -1,6 +1,5 @@
 let _ = (16 : Int8) * (8 : Int8)
 // There should be only one shift per operand
 // CHECK: mul
-// CHECK: i32.shr_s
-// CHECK: i32.const 24
-// CHECK-NEXT: i32.shr_s
+// CHECK: {{i32.const 24|i64.const 48}}
+// CHECK-NEXT: {{i32|i64}}.shr_s
diff --git a/test/trap/mulNat8.mo b/test/trap/mulNat8.mo
index f3569e6fb5e..64c7ba8266c 100644
--- a/test/trap/mulNat8.mo
+++ b/test/trap/mulNat8.mo
@@ -1,6 +1,5 @@
 let _ = (16 : Nat8) * (16 : Nat8)
 // There should be only one shift per operand
 // CHECK: (func $mul
-// CHECK: i32.shr_u
-// CHECK: i32.const 24
-// CHECK-NEXT: i32.shr_u
+// CHECK: {{i32.const 24|i64.const 48}}
+// CHECK-NEXT: {{i32|i64}}.shr_u
diff --git a/test/trap/ok/divInt32-classical.run-ir.ok b/test/trap/ok/divInt32-classical.run-ir.ok
new file mode 100644
index 00000000000..2e173c5cdba
--- /dev/null
+++ b/test/trap/ok/divInt32-classical.run-ir.ok
@@ -0,0 +1 @@
+divInt32-classical.mo:3.9-3.53: execution error, arithmetic overflow
diff --git a/test/trap/ok/divInt32-classical.run-low.ok b/test/trap/ok/divInt32-classical.run-low.ok
new file mode 100644
index 00000000000..2e173c5cdba
--- /dev/null
+++ b/test/trap/ok/divInt32-classical.run-low.ok
@@ -0,0 +1 @@
+divInt32-classical.mo:3.9-3.53: execution error, arithmetic overflow
diff --git a/test/trap/ok/divInt32-classical.run.ok b/test/trap/ok/divInt32-classical.run.ok
new file mode 100644
index 00000000000..2e173c5cdba
--- /dev/null
+++ b/test/trap/ok/divInt32-classical.run.ok
@@ -0,0 +1 @@
+divInt32-classical.mo:3.9-3.53: execution error, arithmetic overflow
diff --git a/test/trap/ok/divInt32.run.ret.ok b/test/trap/ok/divInt32-classical.run.ret.ok
similarity index 100%
rename from test/trap/ok/divInt32.run.ret.ok
rename to test/trap/ok/divInt32-classical.run.ret.ok
diff --git a/test/trap/ok/divInt32.wasm-run.ok b/test/trap/ok/divInt32-classical.wasm-run.ok
similarity index 72%
rename from test/trap/ok/divInt32.wasm-run.ok
rename to test/trap/ok/divInt32-classical.wasm-run.ok
index 56abb509ee0..6a787db9429 100644
--- a/test/trap/ok/divInt32.wasm-run.ok
+++ b/test/trap/ok/divInt32-classical.wasm-run.ok
@@ -1,4 +1,4 @@
-Error: failed to run main module `_out/divInt32.wasm`
+Error: failed to run main module `_out/divInt32-classical.wasm`
 
 Caused by:
     0: failed to invoke command default
diff --git a/test/run/ok/inc-oom.wasm-run.ret.ok b/test/trap/ok/divInt32-classical.wasm-run.ret.ok
similarity index 100%
rename from test/run/ok/inc-oom.wasm-run.ret.ok
rename to test/trap/ok/divInt32-classical.wasm-run.ret.ok
diff --git a/test/trap/ok/divInt32-enhanced.run-ir.ok b/test/trap/ok/divInt32-enhanced.run-ir.ok
new file mode 100644
index 00000000000..564c44d9f0c
--- /dev/null
+++ b/test/trap/ok/divInt32-enhanced.run-ir.ok
@@ -0,0 +1 @@
+divInt32-enhanced.mo:3.9-3.53: execution error, arithmetic overflow
diff --git a/test/trap/ok/divInt32-enhanced.run-low.ok b/test/trap/ok/divInt32-enhanced.run-low.ok
new file mode 100644
index 00000000000..564c44d9f0c
--- /dev/null
+++ b/test/trap/ok/divInt32-enhanced.run-low.ok
@@ -0,0 +1 @@
+divInt32-enhanced.mo:3.9-3.53: execution error, arithmetic overflow
diff --git a/test/trap/ok/divInt32-enhanced.run.ok b/test/trap/ok/divInt32-enhanced.run.ok
new file mode 100644
index 00000000000..564c44d9f0c
--- /dev/null
+++ b/test/trap/ok/divInt32-enhanced.run.ok
@@ -0,0 +1 @@
+divInt32-enhanced.mo:3.9-3.53: execution error, arithmetic overflow
diff --git a/test/trap/ok/divInt32-enhanced.run.ret.ok b/test/trap/ok/divInt32-enhanced.run.ret.ok
new file mode 100644
index 00000000000..69becfa16f9
--- /dev/null
+++ b/test/trap/ok/divInt32-enhanced.run.ret.ok
@@ -0,0 +1 @@
+Return code 1
diff --git a/test/trap/ok/divInt32-enhanced.wasm-run.ok b/test/trap/ok/divInt32-enhanced.wasm-run.ok
new file mode 100644
index 00000000000..42bff533a92
--- /dev/null
+++ b/test/trap/ok/divInt32-enhanced.wasm-run.ok
@@ -0,0 +1,9 @@
+Error: failed to run main module `_out/divInt32-enhanced.wasm`
+
+Caused by:
+    0: failed to invoke command default
+    1: error while executing at wasm backtrace:
+         0: div
+         1: init
+         2: _start
+    2: wasm trap: unreachable
diff --git a/test/trap/ok/divInt32.wasm-run.ret.ok b/test/trap/ok/divInt32-enhanced.wasm-run.ret.ok
similarity index 100%
rename from test/trap/ok/divInt32.wasm-run.ret.ok
rename to test/trap/ok/divInt32-enhanced.wasm-run.ret.ok
diff --git a/test/trap/ok/divInt32.run-ir.ok b/test/trap/ok/divInt32.run-ir.ok
deleted file mode 100644
index a22bc719adc..00000000000
--- a/test/trap/ok/divInt32.run-ir.ok
+++ /dev/null
@@ -1 +0,0 @@
-divInt32.mo:2.9-2.53: execution error, arithmetic overflow
diff --git a/test/trap/ok/divInt32.run-low.ok b/test/trap/ok/divInt32.run-low.ok
deleted file mode 100644
index a22bc719adc..00000000000
--- a/test/trap/ok/divInt32.run-low.ok
+++ /dev/null
@@ -1 +0,0 @@
-divInt32.mo:2.9-2.53: execution error, arithmetic overflow
diff --git a/test/trap/ok/divInt32.run.ok b/test/trap/ok/divInt32.run.ok
deleted file mode 100644
index a22bc719adc..00000000000
--- a/test/trap/ok/divInt32.run.ok
+++ /dev/null
@@ -1 +0,0 @@
-divInt32.mo:2.9-2.53: execution error, arithmetic overflow