diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 7f9ab1a35..efd19d36b 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -33,7 +33,8 @@ jobs: echo "git_target_branch=$(echo ${GITHUB_BASE_REF})" >> $GITHUB_OUTPUT echo "git_ref=${{ github.event.pull_request.head.sha }}" >> $GITHUB_OUTPUT echo "coverage_dir=tanssi-coverage/pulls/${{ github.event.pull_request.number }}" >> $GITHUB_OUTPUT - echo "coverage_report=true" >> $GITHUB_OUTPUT + # echo "coverage_report=true" >> $GITHUB_OUTPUT + echo "coverage_report=false" >> $GITHUB_OUTPUT else echo "git_branch=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_OUTPUT echo "git_target_branch=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_OUTPUT @@ -157,22 +158,22 @@ jobs: with: name: coverage path: coverage - - name: Upload coverage s3 - if: ${{(github.event.pull_request.head.repo.full_name == github.repository || github.event_name == 'push') }} - uses: mario-sangar/upload-s3-action@master - id: S3 - with: - aws_key_id: ${{ secrets.S3_COVERAGE_ID }} - aws_secret_access_key: ${{ secrets.S3_COVERAGE_KEY }} - aws_bucket: ${{ vars.S3_COVERAGE_BUCKET }} - destination_dir: "${{ needs.set-tags.outputs.coverage_dir }}" - source_dir: "coverage" - acl: "none" - - name: Link To Report - if: ${{(github.event.pull_request.head.repo.full_name == github.repository || github.event_name == 'push') }} - run: | - echo "${{steps.S3.outputs.object_key}}" - echo "${{ vars.S3_BUCKET_URL }}/${{steps.S3.outputs.object_key}}/html/index.html" + # - name: Upload coverage s3 + # if: ${{(github.event.pull_request.head.repo.full_name == github.repository || github.event_name == 'push') }} + # uses: mario-sangar/upload-s3-action@master + # id: S3 + # with: + # aws_key_id: ${{ secrets.S3_COVERAGE_ID }} + # aws_secret_access_key: ${{ secrets.S3_COVERAGE_KEY }} + # aws_bucket: ${{ vars.S3_COVERAGE_BUCKET }} + # destination_dir: "${{ needs.set-tags.outputs.coverage_dir }}" + # source_dir: "coverage" + # acl: "none" + # - name: Link To Report + # if: ${{(github.event.pull_request.head.repo.full_name == github.repository || github.event_name == 'push') }} + # run: | + # echo "${{steps.S3.outputs.object_key}}" + # echo "${{ vars.S3_BUCKET_URL }}/${{steps.S3.outputs.object_key}}/html/index.html" - name: Create coverage report comment if: ${{ (needs.set-tags.outputs.coverage_report == 'true') && (github.event.pull_request.head.repo.full_name == github.repository || github.event_name == 'push') }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 7ee29a61c..e963101ef 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -50,7 +50,7 @@ jobs: echo "sha=$(git log -1 --format='%H')" >> $GITHUB_OUTPUT echo "sha8=$(git log -1 --format='%H' | cut -c1-8)" >> $GITHUB_OUTPUT - ENDPOINT="https://api.github.com/repos/moondance-labs/tanssi/git/refs/tags/${{ steps.get-latest-rt.outputs.latest_rt }}" + ENDPOINT="https://api.github.com/repos/AvaProtocol/tanssi-integration/git/refs/tags/${{ steps.get-latest-rt.outputs.latest_rt }}" RESPONSE=$(curl -s -H "Accept: application/vnd.github.v3+json" $ENDPOINT) TYPE=$(echo $RESPONSE | jq -r '.object.type') @@ -72,7 +72,7 @@ jobs: id: check-docker-image run: | TAG=sha-${{ steps.get-sha.outputs.sha8 }} - echo "image_exists=$(docker image inspect moondancelabs/tanssi:$TAG > /dev/null && echo "true" || echo "false")" >> $GITHUB_OUTPUT + echo "image_exists=$(docker image inspect avaprotocol/tanssi:$TAG > /dev/null && echo "true" || echo "false")" >> $GITHUB_OUTPUT - name: Display variables run: | @@ -574,9 +574,9 @@ jobs: fail-fast: false matrix: chains: [ - { chain: "stagenet_dancebox", runtime: "dancebox" }, - { chain: "flashbox", runtime: "flashbox" }, - { chain: "dancebox", runtime: "dancebox" }, + # { chain: "stagenet_dancebox", runtime: "dancebox" }, + # { chain: "flashbox", runtime: "flashbox" }, + # { chain: "dancebox", runtime: "dancebox" }, { chain: "frontier_template", runtime: "container-chain-template-frontier" }, ] env: @@ -609,85 +609,85 @@ jobs: pnpm install pnpm moonwall test chopsticks_${{ matrix.chains.chain }}_upgrade - zombienet-test-upgrade: - runs-on: self-hosted - needs: ["set-tags", "build"] - strategy: - fail-fast: false - matrix: - chain: ["dancebox"] - steps: - - name: Checkout - uses: actions/checkout@v4 - with: - ref: ${{ needs.set-tags.outputs.git_ref }} - - - name: Pnpm - uses: pnpm/action-setup@v3.0.0 - with: - version: 8 - - - name: Setup node - uses: actions/setup-node@v4 - with: - node-version: 20.x - cache: "pnpm" - - - name: Create local folders - run: | - mkdir -p target/release/wbuild/${{ matrix.chain }}-runtime/ - mkdir -p test/tmp - - name: "Download binaries" - uses: actions/download-artifact@v4 - with: - name: binaries - path: target/release - - - name: "Download branch built runtime" - uses: actions/download-artifact@v4 - with: - name: runtimes - path: target/release/wbuild/${{ matrix.chain }}-runtime/ - - - name: Retrieve tanssi binary from docker (for plainSpec generation) - run: | - TANSSI_COMMIT=${{ needs.set-tags.outputs.latest_rt_sha8 }} - DOCKER_TAG="moondancelabs/tanssi:sha-$TANSSI_COMMIT" - - docker rm -f tanssi_container 2> /dev/null | true - docker create --name tanssi_container $DOCKER_TAG bash - docker cp tanssi_container:tanssi/tanssi-node test/tmp/tanssi_rt - docker rm -f tanssi_container - - - name: "Run zombie upgrade test" - run: | - chmod uog+x target/release/tanssi-node - - cd test - - pnpm install - - chmod uog+x tmp/tanssi_rt - tmp/tanssi_rt build-spec --chain ${{ matrix.chain }}-local > tmp/${{ matrix.chain }}-plain-spec.json - pnpm tsx scripts/modify-plain-specs.ts process tmp/${{ matrix.chain }}-plain-spec.json tmp/${{ matrix.chain }}-modified-spec.json - tmp/tanssi_rt build-spec --chain tmp/${{ matrix.chain }}-modified-spec.json --raw > tmp/${{ matrix.chain }}-raw-spec.json - - ## Run tests - - pnpm moonwall test zombie_${{ matrix.chain }}_upgrade - - name: Zip and Upload Node Logs on Failure - if: failure() - run: | - TIMESTAMP=$(date +%Y%m%d%H%M%S) - export NODE_LOGS_ZIP="node_logs_$TIMESTAMP.zip" - MOST_RECENT_ZOMBIE_DIR=$(ls -td /tmp/zombie-* | head -n 1) - find $MOST_RECENT_ZOMBIE_DIR -maxdepth 1 -type f -name '*.log' -exec zip -r $NODE_LOGS_ZIP {} \; - echo "NODE_LOGS_ZIP=${NODE_LOGS_ZIP}" >> $GITHUB_ENV - - uses: actions/upload-artifact@v4 - if: failure() - with: - name: failed-node-logs - path: ${{ env.NODE_LOGS_ZIP }} + # zombienet-test-upgrade: + # runs-on: self-hosted + # needs: ["set-tags", "build"] + # strategy: + # fail-fast: false + # matrix: + # chain: ["dancebox"] + # steps: + # - name: Checkout + # uses: actions/checkout@v4 + # with: + # ref: ${{ needs.set-tags.outputs.git_ref }} + + # - name: Pnpm + # uses: pnpm/action-setup@v3.0.0 + # with: + # version: 8 + + # - name: Setup node + # uses: actions/setup-node@v4 + # with: + # node-version: 20.x + # cache: "pnpm" + + # - name: Create local folders + # run: | + # mkdir -p target/release/wbuild/${{ matrix.chain }}-runtime/ + # mkdir -p test/tmp + # - name: "Download binaries" + # uses: actions/download-artifact@v4 + # with: + # name: binaries + # path: target/release + + # - name: "Download branch built runtime" + # uses: actions/download-artifact@v4 + # with: + # name: runtimes + # path: target/release/wbuild/${{ matrix.chain }}-runtime/ + + # - name: Retrieve tanssi binary from docker (for plainSpec generation) + # run: | + # TANSSI_COMMIT=${{ needs.set-tags.outputs.latest_rt_sha8 }} + # DOCKER_TAG="avaprotocol/tanssi:sha-$TANSSI_COMMIT" + + # docker rm -f tanssi_container 2> /dev/null | true + # docker create --name tanssi_container $DOCKER_TAG bash + # docker cp tanssi_container:tanssi/tanssi-node test/tmp/tanssi_rt + # docker rm -f tanssi_container + + # - name: "Run zombie upgrade test" + # run: | + # chmod uog+x target/release/tanssi-node + + # cd test + + # pnpm install + + # chmod uog+x tmp/tanssi_rt + # tmp/tanssi_rt build-spec --chain ${{ matrix.chain }}-local > tmp/${{ matrix.chain }}-plain-spec.json + # pnpm tsx scripts/modify-plain-specs.ts process tmp/${{ matrix.chain }}-plain-spec.json tmp/${{ matrix.chain }}-modified-spec.json + # tmp/tanssi_rt build-spec --chain tmp/${{ matrix.chain }}-modified-spec.json --raw > tmp/${{ matrix.chain }}-raw-spec.json + + # ## Run tests + + # pnpm moonwall test zombie_${{ matrix.chain }}_upgrade + # - name: Zip and Upload Node Logs on Failure + # if: failure() + # run: | + # TIMESTAMP=$(date +%Y%m%d%H%M%S) + # export NODE_LOGS_ZIP="node_logs_$TIMESTAMP.zip" + # MOST_RECENT_ZOMBIE_DIR=$(ls -td /tmp/zombie-* | head -n 1) + # find $MOST_RECENT_ZOMBIE_DIR -maxdepth 1 -type f -name '*.log' -exec zip -r $NODE_LOGS_ZIP {} \; + # echo "NODE_LOGS_ZIP=${NODE_LOGS_ZIP}" >> $GITHUB_ENV + # - uses: actions/upload-artifact@v4 + # if: failure() + # with: + # name: failed-node-logs + # path: ${{ env.NODE_LOGS_ZIP }} zombienet-test-upgrade-containers: runs-on: self-hosted @@ -696,7 +696,7 @@ jobs: fail-fast: false matrix: chains: [ - { chain: "frontier_template", runtime: "container-chain-template-frontier" }, + # { chain: "frontier_template", runtime: "container-chain-template-frontier" }, { chain: "simple_template", runtime: "container-chain-template-simple" }, ] steps: @@ -757,7 +757,7 @@ jobs: - name: Prepare id: prep run: | - DOCKER_IMAGE=moondancelabs/${{matrix.image}} + DOCKER_IMAGE=avaprotocol/${{matrix.image}} TAGS="${DOCKER_IMAGE}:sha-${{ needs.set-tags.outputs.sha8 }}" echo "tags=${TAGS}" >> $GITHUB_OUTPUT echo "created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT diff --git a/Cargo.lock b/Cargo.lock index b39894cb5..d5718cd1a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -740,6 +740,25 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "ava-protocol-primitives" +version = "1.0.0" +dependencies = [ + "frame-support", + "impl-trait-for-tuples", + "orml-traits", + "parity-scale-codec", + "scale-info", + "sp-consensus-aura", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", +] + [[package]] name = "backtrace" version = "0.3.69" @@ -1680,6 +1699,23 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2382f75942f4b3be3690fe4f86365e9c853c1587d6ee58212cebf6e2a9ccd101" +[[package]] +name = "common-runtime" +version = "0.1.0" +dependencies = [ + "ava-protocol-primitives", + "frame-support", + "frame-system", + "orml-tokens", + "orml-traits", + "pallet-balances", + "pallet-transaction-payment", + "pallet-treasury", + "polkadot-primitives", + "sp-runtime", + "sp-std", +] + [[package]] name = "concurrent-queue" version = "2.4.0" @@ -1843,8 +1879,10 @@ version = "0.8.0" dependencies = [ "async-io 1.13.0", "async-trait", + "ava-protocol-primitives", "ccp-authorities-noting-inherent", "clap 4.5.4", + "common-runtime", "container-chain-template-simple-runtime", "cumulus-client-cli", "cumulus-client-consensus-aura", @@ -1868,6 +1906,7 @@ dependencies = [ "nimbus-consensus", "nimbus-primitives", "node-common", + "orml-asset-registry", "parity-scale-codec", "polkadot-cli", "polkadot-parachain-primitives", @@ -1908,6 +1947,7 @@ dependencies = [ "sp-session", "sp-timestamp", "sp-transaction-pool", + "staging-xcm", "substrate-build-script-utils", "substrate-frame-rpc-system", "substrate-prometheus-endpoint", @@ -2026,6 +2066,8 @@ name = "container-chain-template-simple-runtime" version = "0.1.0" dependencies = [ "async-backing-primitives", + "ava-protocol-primitives", + "common-runtime", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-session-benchmarking", @@ -2048,14 +2090,23 @@ dependencies = [ "hex-literal 0.3.4", "log", "nimbus-primitives", + "orml-asset-registry", + "orml-currencies", + "orml-tokens", + "orml-traits", + "orml-xtokens", "pallet-asset-rate", "pallet-assets", "pallet-async-backing", "pallet-author-inherent", + "pallet-automation-price", + "pallet-automation-time", "pallet-balances", "pallet-cc-authorities-noting", + "pallet-collective", "pallet-foreign-asset-creator", "pallet-maintenance-mode", + "pallet-membership", "pallet-message-queue", "pallet-migrations 0.1.0", "pallet-multisig", @@ -2071,6 +2122,7 @@ dependencies = [ "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-executor-utils", + "pallet-xcmp-handler", "parachains-common", "parity-scale-codec", "polkadot-parachain-primitives", @@ -8048,6 +8100,132 @@ dependencies = [ "num-traits", ] +[[package]] +name = "orml-asset-registry" +version = "0.10.0" +source = "git+https://github.com/AstarNetwork/open-runtime-module-library?branch=polkadot-v1.11.0#d3629f99bd7e679bf217a247966df9fcf17fa55d" +dependencies = [ + "frame-support", + "frame-system", + "log", + "orml-traits", + "pallet-xcm", + "parity-scale-codec", + "polkadot-runtime-common", + "scale-info", + "serde", + "sp-io", + "sp-runtime", + "sp-std", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", +] + +[[package]] +name = "orml-currencies" +version = "0.10.0" +source = "git+https://github.com/AstarNetwork/open-runtime-module-library?branch=polkadot-v1.11.0#d3629f99bd7e679bf217a247966df9fcf17fa55d" +dependencies = [ + "frame-support", + "frame-system", + "orml-traits", + "orml-utilities", + "parity-scale-codec", + "scale-info", + "serde", + "sp-io", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "orml-tokens" +version = "0.10.0" +source = "git+https://github.com/AstarNetwork/open-runtime-module-library?branch=polkadot-v1.11.0#d3629f99bd7e679bf217a247966df9fcf17fa55d" +dependencies = [ + "frame-support", + "frame-system", + "log", + "orml-traits", + "parity-scale-codec", + "scale-info", + "serde", + "sp-arithmetic", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "orml-traits" +version = "0.10.0" +source = "git+https://github.com/AstarNetwork/open-runtime-module-library?branch=polkadot-v1.11.0#d3629f99bd7e679bf217a247966df9fcf17fa55d" +dependencies = [ + "frame-support", + "impl-trait-for-tuples", + "num-traits", + "orml-utilities", + "parity-scale-codec", + "paste", + "scale-info", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "staging-xcm", +] + +[[package]] +name = "orml-utilities" +version = "0.10.0" +source = "git+https://github.com/AstarNetwork/open-runtime-module-library?branch=polkadot-v1.11.0#d3629f99bd7e679bf217a247966df9fcf17fa55d" +dependencies = [ + "frame-support", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "orml-xcm-support" +version = "0.10.0" +source = "git+https://github.com/AstarNetwork/open-runtime-module-library?branch=polkadot-v1.11.0#d3629f99bd7e679bf217a247966df9fcf17fa55d" +dependencies = [ + "frame-support", + "orml-traits", + "parity-scale-codec", + "sp-runtime", + "sp-std", + "staging-xcm", + "staging-xcm-executor", +] + +[[package]] +name = "orml-xtokens" +version = "0.10.0" +source = "git+https://github.com/AstarNetwork/open-runtime-module-library?branch=polkadot-v1.11.0#d3629f99bd7e679bf217a247966df9fcf17fa55d" +dependencies = [ + "frame-support", + "frame-system", + "log", + "orml-traits", + "orml-xcm-support", + "pallet-xcm", + "parity-scale-codec", + "scale-info", + "serde", + "sp-io", + "sp-runtime", + "sp-std", + "staging-xcm", + "staging-xcm-executor", +] + [[package]] name = "os_str_bytes" version = "6.6.1" @@ -8278,6 +8456,73 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-automation-price" +version = "0.1.0" +dependencies = [ + "ava-protocol-primitives", + "cumulus-pallet-xcm", + "cumulus-primitives-core", + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "orml-currencies", + "orml-tokens", + "orml-traits", + "pallet-balances", + "pallet-timestamp", + "pallet-xcm", + "pallet-xcmp-handler", + "parity-scale-codec", + "polkadot-parachain-primitives", + "rand 0.8.5", + "scale-info", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", +] + +[[package]] +name = "pallet-automation-time" +version = "1.0.0" +dependencies = [ + "ava-protocol-primitives", + "cumulus-pallet-xcm", + "cumulus-primitives-core", + "frame-benchmarking", + "frame-support", + "frame-system", + "hex", + "log", + "orml-currencies", + "orml-tokens", + "orml-traits", + "pallet-balances", + "pallet-timestamp", + "pallet-xcm", + "pallet-xcmp-handler", + "parity-scale-codec", + "polkadot-parachain-primitives", + "rand 0.8.5", + "scale-info", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", +] + [[package]] name = "pallet-babe" version = "28.0.0" @@ -10133,6 +10378,36 @@ dependencies = [ "staging-xcm", ] +[[package]] +name = "pallet-xcmp-handler" +version = "0.1.0" +dependencies = [ + "ava-protocol-primitives", + "cumulus-pallet-xcm", + "cumulus-primitives-core", + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "orml-currencies", + "orml-tokens", + "orml-traits", + "pallet-balances", + "pallet-xcm", + "parity-scale-codec", + "polkadot-parachain-primitives", + "scale-info", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", +] + [[package]] name = "parachains-common" version = "7.0.0" diff --git a/Cargo.toml b/Cargo.toml index 211ae0f99..a3f0b3d63 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,6 @@ [workspace] members = [ + "ava-protocol-primitives", "client/*", "container-chains/nodes/*", "container-chains/runtime-templates/*", @@ -488,6 +489,318 @@ polkadot-node-core-pvf-prepare-worker = { git = "https://github.com/moondance-l substrate-rpc-client = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0", default-features = false } +# ORML +orml-asset-registry = { git = "https://github.com/AstarNetwork/open-runtime-module-library", branch = "polkadot-v1.11.0", default-features = false } +orml-currencies = { git = "https://github.com/AstarNetwork/open-runtime-module-library", branch = "polkadot-v1.11.0", default-features = false } +orml-tokens = { git = "https://github.com/AstarNetwork/open-runtime-module-library", branch = "polkadot-v1.11.0", default-features = false } +orml-traits = { git = "https://github.com/AstarNetwork/open-runtime-module-library", branch = "polkadot-v1.11.0", default-features = false } +orml-xtokens = { git = "https://github.com/AstarNetwork/open-runtime-module-library", branch = "polkadot-v1.11.0", default-features = false } + +# Ava Protocol +ava-protocol-primitives = { path = "ava-protocol-primitives", default-features = false } +common-runtime = { path = "container-chains/runtime-templates/common", default-features = false } +pallet-automation-price = { path = "pallets/automation-price", default-features = false } +pallet-automation-time = { path = "pallets/automation-time", default-features = false } +pallet-xcmp-handler = { path = "pallets/xcmp-handler", default-features = false } + +[patch."https://github.com/paritytech/polkadot-sdk"] +asset-test-utils = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +assets-common = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +binary-merkle-tree = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +bp-header-chain = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +bp-messages = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +bp-parachains = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +bp-polkadot-core = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +bp-relayers = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +bp-runtime = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +bp-test-utils = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +bp-xcm-bridge-hub = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +bp-xcm-bridge-hub-router = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +bridge-runtime-common = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +cumulus-client-cli = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +cumulus-client-collator = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +cumulus-client-consensus-aura = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +cumulus-client-consensus-common = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +cumulus-client-consensus-proposer = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +cumulus-client-network = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +cumulus-client-parachain-inherent = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +cumulus-client-pov-recovery = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +cumulus-client-service = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +cumulus-pallet-dmp-queue = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +cumulus-pallet-parachain-system = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +cumulus-pallet-parachain-system-proc-macro = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +cumulus-pallet-session-benchmarking = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +cumulus-pallet-xcm = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +cumulus-pallet-xcmp-queue = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +cumulus-primitives-aura = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +cumulus-primitives-core = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +cumulus-primitives-parachain-inherent = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +cumulus-primitives-proof-size-hostfunction = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +cumulus-primitives-timestamp = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +cumulus-primitives-utility = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +cumulus-relay-chain-inprocess-interface = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +cumulus-relay-chain-interface = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +cumulus-relay-chain-minimal-node = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +cumulus-relay-chain-rpc-interface = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +cumulus-test-relay-sproof-builder = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +emulated-integration-tests-common = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +fork-tree = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +frame-benchmarking = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +frame-benchmarking-cli = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +frame-election-provider-solution-type = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +frame-election-provider-support = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +frame-executive = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +frame-support = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +frame-support-procedural = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +frame-support-procedural-tools = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +frame-support-procedural-tools-derive = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +frame-system = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +frame-system-benchmarking = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +frame-system-rpc-runtime-api = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +frame-try-runtime = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +mmr-gadget = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +mmr-rpc = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-asset-conversion = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-asset-rate = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-asset-tx-payment = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-assets = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-authority-discovery = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-authorship = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-babe = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-bags-list = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-balances = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-beefy = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-beefy-mmr = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-bounties = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-bridge-grandpa = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-bridge-messages = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-bridge-parachains = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-bridge-relayers = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-broker = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-child-bounties = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-collator-selection = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-collective = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-conviction-voting = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-democracy = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-election-provider-multi-phase = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-election-provider-support-benchmarking = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-elections-phragmen = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-fast-unstake = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-grandpa = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-identity = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-im-online = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-indices = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-membership = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-message-queue = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-mmr = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-multisig = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-nis = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-nomination-pools = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-nomination-pools-benchmarking = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-nomination-pools-runtime-api = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-offences = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-offences-benchmarking = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-parameters = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-preimage = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-proxy = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-ranked-collective = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-recovery = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-referenda = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-root-testing = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-scheduler = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-session = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-session-benchmarking = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-society = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-staking = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-staking-reward-curve = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-staking-reward-fn = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-staking-runtime-api = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-state-trie-migration = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-sudo = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-timestamp = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-tips = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-transaction-payment = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-transaction-payment-rpc = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-treasury = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-tx-pause = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-utility = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-vesting = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-whitelist = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-xcm = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-xcm-benchmarks = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +pallet-xcm-bridge-hub-router = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +parachains-common = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +parachains-runtimes-test-utils = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-approval-distribution = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-availability-bitfield-distribution = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-availability-distribution = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-availability-recovery = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-cli = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-collator-protocol = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-core-primitives = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-dispute-distribution = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-erasure-coding = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-gossip-support = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-network-bridge = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-node-collation-generation = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-node-core-approval-voting = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-node-core-av-store = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-node-core-backing = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-node-core-bitfield-signing = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-node-core-candidate-validation = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-node-core-chain-api = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-node-core-chain-selection = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-node-core-dispute-coordinator = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-node-core-parachains-inherent = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-node-core-prospective-parachains = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-node-core-provisioner = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-node-core-pvf = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-node-core-pvf-checker = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-node-core-pvf-common = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-node-core-runtime-api = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-node-jaeger = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-node-metrics = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-node-network-protocol = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-node-primitives = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-node-subsystem = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-node-subsystem-types = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-node-subsystem-util = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-overseer = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-parachain-primitives = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-primitives = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-rpc = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-runtime-common = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-runtime-metrics = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-runtime-parachains = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-service = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-statement-distribution = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +polkadot-statement-table = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +rococo-runtime = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +rococo-runtime-constants = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-allocator = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-authority-discovery = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-basic-authorship = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-block-builder = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-chain-spec = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-chain-spec-derive = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-cli = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-client-api = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-client-db = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-consensus = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-consensus-aura = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-consensus-babe = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-consensus-babe-rpc = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-consensus-beefy = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-consensus-beefy-rpc = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-consensus-epochs = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-consensus-grandpa = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-consensus-grandpa-rpc = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-consensus-manual-seal = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-consensus-slots = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-executor = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-executor-common = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-executor-polkavm = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-executor-wasmtime = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-informant = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-keystore = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-mixnet = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-network = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-network-common = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-network-gossip = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-network-light = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-network-sync = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-network-test = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-network-transactions = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-network-types = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-offchain = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-proposer-metrics = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-rpc = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-rpc-api = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-rpc-server = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-rpc-spec-v2 = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-service = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-state-db = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-storage-monitor = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-sync-state-rpc = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-sysinfo = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-telemetry = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-tracing = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-tracing-proc-macro = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-transaction-pool = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-transaction-pool-api = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sc-utils = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +slot-range-helper = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-api = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-api-proc-macro = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-application-crypto = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-arithmetic = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-authority-discovery = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-block-builder = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-blockchain = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-consensus = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-consensus-aura = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-consensus-babe = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-consensus-beefy = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-consensus-grandpa = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-consensus-slots = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-core = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-crypto-hashing = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-crypto-hashing-proc-macro = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-database = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-debug-derive = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-externalities = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-genesis-builder = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-inherents = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-io = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-keyring = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-keystore = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-maybe-compressed-blob = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-metadata-ir = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-mixnet = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-mmr-primitives = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-npos-elections = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-offchain = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-panic-handler = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-rpc = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-runtime = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-runtime-interface = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-runtime-interface-proc-macro = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-session = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-staking = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-state-machine = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-statement-store = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-std = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-storage = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-timestamp = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-tracing = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-transaction-pool = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-transaction-storage-proof = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-trie = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-version = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-version-proc-macro = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-wasm-interface = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +sp-weights = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +staging-parachain-info = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +staging-xcm = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +staging-xcm-builder = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +staging-xcm-executor = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +substrate-bip39 = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +substrate-build-script-utils = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +substrate-frame-rpc-system = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +substrate-prometheus-endpoint = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +substrate-state-trie-migration-rpc = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +substrate-test-client = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +substrate-test-runtime = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +substrate-test-runtime-client = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +substrate-wasm-builder = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +tracing-gum = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +tracing-gum-proc-macro = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +westend-runtime = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +westend-runtime-constants = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +xcm-emulator = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } +xcm-procedural = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.11.0" } + [profile.production] codegen-units = 1 inherits = "release" diff --git a/ava-protocol-primitives/Cargo.toml b/ava-protocol-primitives/Cargo.toml new file mode 100644 index 000000000..212aaeb9a --- /dev/null +++ b/ava-protocol-primitives/Cargo.toml @@ -0,0 +1,50 @@ +[package] +name = "ava-protocol-primitives" +authors = [ "Ava Protocol Team" ] +description = "Ava Protocol Runtime primitives" +edition = "2021" +homepage = "https://avaprotocol.org" +license = "GPL-3.0" +repository = "https://github.com/AvaProtocol/tanssi-integration" +version = "1.0.0" + +[dependencies] +impl-trait-for-tuples = { workspace = true } +parity-scale-codec = { workspace = true, features = [ "derive" ] } +scale-info = { workspace = true, features = [ "derive" ] } + +# Substrate Dependencies +## Substrate Primitive Dependencies +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } + +## Substrate FRAME Dependencies +frame-support = { workspace = true } + +## Polkdadot deps +staging-xcm = { workspace = true } +staging-xcm-builder = { workspace = true } +staging-xcm-executor = { workspace = true } + +## ORML deps +orml-traits = { workspace = true } + +[features] +default = [ "std" ] +std = [ + "frame-support/std", + "orml-traits/std", + "parity-scale-codec/std", + "scale-info/std", + "sp-consensus-aura/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "staging-xcm-builder/std", + "staging-xcm-executor/std", + "staging-xcm/std", +] diff --git a/ava-protocol-primitives/src/assets.rs b/ava-protocol-primitives/src/assets.rs new file mode 100644 index 000000000..5db92caf7 --- /dev/null +++ b/ava-protocol-primitives/src/assets.rs @@ -0,0 +1,60 @@ +// This file is part of Ava Protocol. + +// Copyright (C) 2022 Ava Protocol +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific + +use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; + +#[derive( + Clone, + Copy, + Default, + PartialOrd, + Ord, + PartialEq, + Eq, + Debug, + Encode, + Decode, + TypeInfo, + MaxEncodedLen, +)] +pub struct ConversionRate { + pub native: u32, + pub foreign: u32, +} + +/// TODO: add decimal conversion +/// A type describing our custom additional metadata stored in the orml-asset-registry. +#[derive( + Clone, + Copy, + Default, + PartialOrd, + Ord, + PartialEq, + Eq, + Debug, + Encode, + Decode, + TypeInfo, + MaxEncodedLen, +)] +pub struct CustomMetadata { + /// The fee charged for every second that an XCM message takes to execute. + pub fee_per_second: Option, + /// The token conversion rate of Native to Foreign, ie. 1::10 + pub conversion_rate: Option, +} diff --git a/ava-protocol-primitives/src/lib.rs b/ava-protocol-primitives/src/lib.rs new file mode 100644 index 000000000..e54d0cab6 --- /dev/null +++ b/ava-protocol-primitives/src/lib.rs @@ -0,0 +1,92 @@ +// This file is part of Ava Protocol. + +// Copyright (C) 2022 Ava Protocol +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#![cfg_attr(not(feature = "std"), no_std)] + +use sp_core::H256; +use sp_runtime::{ + generic, + traits::{BlakeTwo256, IdentifyAccount, Verify}, + MultiAddress, MultiSignature, +}; +use sp_std::marker::PhantomData; + +use frame_support::traits::Get; + +use orml_traits::location::{RelativeReserveProvider, Reserve}; +use staging_xcm::latest::prelude::*; + +pub mod assets; + +pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; + +/// Alias to 512-bit hash when used in the context of a transaction signature on the chain. +pub type Signature = MultiSignature; + +/// Some way of identifying an account on the chain. We intentionally make it equivalent +/// to the public key of our transaction signing scheme. +pub type AccountId = <::Signer as IdentifyAccount>::AccountId; + +/// Balance of an account. +pub type Balance = u128; + +/// The signed version of `Balance` +pub type Amount = i128; + +/// Index of a transaction in the chain. +pub type Index = u32; + +/// A hash of some data used by the chain. +pub type Hash = H256; + +/// An index to a block. +pub type BlockNumber = u32; + +/// Identifier of a token or asset +pub type TokenId = u32; + +/// The address format for describing accounts. +pub type Address = MultiAddress; + +/// Block header type as expected by this runtime. +pub type Header = generic::Header; + +/// Opaque, encoded, unchecked extrinsic. +pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic; + +/// Block type. +pub type Block = generic::Block; + +pub trait EnsureProxy { + fn ensure_ok(delegator: AccountId, delegatee: AccountId) -> Result<(), &'static str>; +} + +/// `Asset` reserve location provider. It's based on `RelativeReserveProvider` and in +/// addition will convert self absolute location to relative location. +pub struct AbsoluteAndRelativeReserveProvider(PhantomData); +impl> Reserve + for AbsoluteAndRelativeReserveProvider +{ + fn reserve(asset: &Asset) -> Option { + RelativeReserveProvider::reserve(asset).map(|reserve_location| { + if reserve_location == AbsoluteLocation::get() { + Location::here() + } else { + reserve_location + } + }) + } +} diff --git a/container-chains/nodes/simple/Cargo.toml b/container-chains/nodes/simple/Cargo.toml index d2d48df68..a81f7e1f9 100644 --- a/container-chains/nodes/simple/Cargo.toml +++ b/container-chains/nodes/simple/Cargo.toml @@ -82,6 +82,9 @@ sp-transaction-pool = { workspace = true } substrate-frame-rpc-system = { workspace = true } substrate-prometheus-endpoint = { workspace = true } +# Polkadot (wasm) +staging-xcm = { workspace = true } + # Polkadot polkadot-cli = { workspace = true } polkadot-parachain-primitives = { workspace = true } @@ -99,6 +102,13 @@ cumulus-primitives-core = { workspace = true } cumulus-primitives-parachain-inherent = { workspace = true } cumulus-relay-chain-interface = { workspace = true } +# ORML +orml-asset-registry = { workspace = true } + +# Ava Protocol +ava-protocol-primitives = { workspace = true } +common-runtime = { workspace = true } + [build-dependencies] substrate-build-script-utils = { workspace = true } @@ -110,6 +120,7 @@ runtime-benchmarks = [ "frame-benchmarking-cli/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "nimbus-primitives/runtime-benchmarks", + "orml-asset-registry/runtime-benchmarks", "polkadot-cli/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", @@ -121,6 +132,7 @@ runtime-benchmarks = [ try-runtime = [ "container-chain-template-simple-runtime/try-runtime", "nimbus-primitives/try-runtime", + "orml-asset-registry/try-runtime", "polkadot-cli/try-runtime", "polkadot-service/try-runtime", "sp-runtime/try-runtime", diff --git a/container-chains/nodes/simple/src/chain_spec.rs b/container-chains/nodes/simple/src/chain_spec.rs index 1faf5bdce..154b37e6e 100644 --- a/container-chains/nodes/simple/src/chain_spec.rs +++ b/container-chains/nodes/simple/src/chain_spec.rs @@ -15,16 +15,27 @@ // along with Tanssi. If not, see . use { + ava_protocol_primitives::{assets::CustomMetadata, Balance, TokenId}, + common_runtime::{ + config::orml_asset_registry::{AssetMetadataOf, StringLimit}, + constants::currency::TOKEN_DECIMALS, + }, container_chain_template_simple_runtime::{ - AccountId, MaintenanceModeConfig, MigrationsConfig, PolkadotXcmConfig, Signature, + AccountId, AssetRegistryConfig, MaintenanceModeConfig, MigrationsConfig, PolkadotXcmConfig, + Signature, }, cumulus_primitives_core::ParaId, + parity_scale_codec::Encode, sc_chain_spec::{ChainSpecExtension, ChainSpecGroup}, sc_network::config::MultiaddrWithPeerId, sc_service::ChainType, serde::{Deserialize, Serialize}, sp_core::{sr25519, Pair, Public}, - sp_runtime::traits::{IdentifyAccount, Verify}, + sp_runtime::{ + traits::{IdentifyAccount, Verify}, + BoundedVec, + }, + staging_xcm::{prelude::*, VersionedLocation}, }; /// Specialized `ChainSpec` for the normal parachain runtime. @@ -43,6 +54,8 @@ pub fn get_from_seed(seed: &str) -> ::Pu /// Orcherstrator's parachain id pub const ORCHESTRATOR: ParaId = ParaId::new(1000); +const TOKEN_SYMBOL: &str = "TUR"; + /// The extensions for the [`ChainSpec`]. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecGroup, ChainSpecExtension)] #[serde(deny_unknown_fields)] @@ -73,7 +86,7 @@ where pub fn development_config(para_id: ParaId, boot_nodes: Vec) -> ChainSpec { // Give your base currency a unit name and decimal places let mut properties = sc_chain_spec::Properties::new(); - properties.insert("tokenSymbol".into(), "UNIT".into()); + properties.insert("tokenSymbol".into(), TOKEN_SYMBOL.into()); properties.insert("tokenDecimals".into(), 12.into()); properties.insert("ss58Format".into(), 42.into()); properties.insert("isEthereum".into(), false.into()); @@ -104,6 +117,7 @@ pub fn development_config(para_id: ParaId, boot_nodes: Vec) -> ChainSpec default_funded_accounts.clone(), para_id, get_account_id_from_seed::("Alice"), + vec![], )) .with_properties(properties) .with_boot_nodes(boot_nodes) @@ -145,6 +159,7 @@ pub fn local_testnet_config(para_id: ParaId, boot_nodes: Vec) -> ChainSp default_funded_accounts.clone(), para_id, get_account_id_from_seed::("Alice"), + vec![], )) .with_properties(properties) .with_protocol_id(&protocol_id) @@ -156,7 +171,38 @@ fn testnet_genesis( endowed_accounts: Vec, id: ParaId, root_key: AccountId, + additional_assets: Vec<(TokenId, Vec)>, ) -> serde_json::Value { + let assets = [ + vec![( + 0, + orml_asset_registry::AssetMetadata::::encode( + &AssetMetadataOf { + decimals: TOKEN_DECIMALS, + name: BoundedVec::truncate_from(b"Native".to_vec()), + symbol: BoundedVec::truncate_from(TOKEN_SYMBOL.as_bytes().to_vec()), + existential_deposit: 100_000_000, + location: Some(VersionedLocation::V4(Location { + parents: 0, + interior: Here, + })), + additional: CustomMetadata { + fee_per_second: Some(416_000_000_000), + conversion_rate: None, + }, + }, + ), + )], + additional_assets, + ] + .concat(); + + let last_asset_id = assets + .iter() + .map(|asset| asset.0) + .max() + .expect("At least 1 item!"); + let g = container_chain_template_simple_runtime::RuntimeGenesisConfig { balances: container_chain_template_simple_runtime::BalancesConfig { balances: endowed_accounts @@ -187,6 +233,11 @@ fn testnet_genesis( transaction_payment: Default::default(), tx_pause: Default::default(), system: Default::default(), + asset_registry: AssetRegistryConfig { + assets, + last_asset_id, + }, + tokens: Default::default(), }; serde_json::to_value(g).unwrap() diff --git a/container-chains/runtime-templates/common/Cargo.toml b/container-chains/runtime-templates/common/Cargo.toml new file mode 100644 index 000000000..a4f6d1af7 --- /dev/null +++ b/container-chains/runtime-templates/common/Cargo.toml @@ -0,0 +1,50 @@ +[package] +name = "common-runtime" +authors = [ "Ava Protocol Team" ] +description = "The commonalities between runtimes" +edition = "2021" +homepage = "https://avaprotocol.org" +license = "GPL-3.0" +repository = "https://github.com/AvaProtocol/tanssi-integration" +version = "0.1.0" + +[dependencies] +# Substrate Dependencies +## Substrate Primitive Dependencies +sp-runtime = { workspace = true } +sp-std = { workspace = true } + +## Substrate FRAME Dependencies +frame-support = { workspace = true } +frame-system = { workspace = true } + +## Substrate Pallet Dependencies +pallet-balances = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-treasury = { workspace = true } + +## Polkdadot deps +polkadot-primitives = { workspace = true } + +## ORML +orml-tokens = { workspace = true } +orml-traits = { workspace = true } + +# Local Dependencies +ava-protocol-primitives = { workspace = true } + +[features] +default = [ "std" ] +std = [ + "ava-protocol-primitives/std", + "frame-support/std", + "frame-system/std", + "orml-tokens/std", + "orml-traits/std", + "pallet-balances/std", + "pallet-transaction-payment/std", + "pallet-treasury/std", + "polkadot-primitives/std", + "sp-runtime/std", + "sp-std/std", +] diff --git a/container-chains/runtime-templates/common/src/constants.rs b/container-chains/runtime-templates/common/src/constants.rs new file mode 100644 index 000000000..a1f003792 --- /dev/null +++ b/container-chains/runtime-templates/common/src/constants.rs @@ -0,0 +1,117 @@ +// This file is part of Ava Protocol. + +// Copyright (C) 2022 Ava Protocol +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod time { + use ava_protocol_primitives::BlockNumber; + + /// This determines the average expected block time that we are targeting. + /// Blocks will be produced at a minimum duration defined by `SLOT_DURATION`. + /// `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked + /// up by `pallet_aura` to implement `fn slot_duration()`. + /// + /// Change this to adjust the block time. + pub const MILLISECS_PER_BLOCK: u64 = 12000; + + // NOTE: Currently it is not possible to change the slot duration after the chain has started. + // Attempting to do so will brick block production. + pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; + + // Time is measured by number of blocks. + pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); + pub const HOURS: BlockNumber = MINUTES * 60; + pub const DAYS: BlockNumber = HOURS * 24; +} + +pub mod currency { + use ava_protocol_primitives::Balance; + + pub const TOKEN_DECIMALS: u32 = 10; + const TOKEN_BASE: u128 = 10; + // Unit = the base number of indivisible units for balances + pub const UNIT: Balance = TOKEN_BASE.pow(TOKEN_DECIMALS); // 10_000_000_000 + pub const DOLLAR: Balance = UNIT; // 10_000_000_000 + pub const CENT: Balance = DOLLAR / 100; // 100_000_000 + pub const MILLICENT: Balance = CENT / 1_000; // 100_000 + + /// The existential deposit. Set to 1/100 of the Connected Relay Chain. + pub const EXISTENTIAL_DEPOSIT: Balance = CENT; + + pub const fn deposit(items: u32, bytes: u32) -> Balance { + items as Balance * 2_000 * CENT + (bytes as Balance) * 100 * MILLICENT + } +} + +pub mod fees { + use frame_support::parameter_types; + use pallet_transaction_payment::{Multiplier, TargetedFeeAdjustment}; + use sp_runtime::{traits::Bounded, FixedPointNumber, Perquintill}; + + parameter_types! { + /// The portion of the `NORMAL_DISPATCH_RATIO` that we adjust the fees with. Blocks filled less + /// than this will decrease the weight and more will increase. + pub const TargetBlockFullness: Perquintill = Perquintill::from_percent(1); + /// The adjustment variable of the runtime. Higher values will cause `TargetBlockFullness` to + /// change the fees more rapidly. + pub AdjustmentVariable: Multiplier = Multiplier::saturating_from_rational(3, 100_000); + /// Minimum amount of the multiplier. This value cannot be too low. A test case should ensure + /// that combined with `AdjustmentVariable`, we can recover from the minimum. + /// See `multiplier_can_grow_from_zero`. + pub MinimumMultiplier: Multiplier = Multiplier::saturating_from_rational(1, 1_000_000u128); + pub MaximumMultiplier: Multiplier = Bounded::max_value(); + } + + /// Parameterized slow adjusting fee updated based on + /// https://w3f-research.readthedocs.io/en/latest/polkadot/overview/2-token-economics.html#-2.-slow-adjusting-mechanism // editorconfig-checker-disable-line + /// + /// The adjustment algorithm boils down to: + /// + /// diff = (previous_block_weight - target) / maximum_block_weight + /// next_multiplier = prev_multiplier * (1 + (v * diff) + ((v * diff)^2 / 2)) + /// assert(next_multiplier > min) + /// where: v is AdjustmentVariable + /// target is TargetBlockFullness + /// min is MinimumMultiplier + pub type SlowAdjustingFeeUpdate = TargetedFeeAdjustment< + R, + TargetBlockFullness, + AdjustmentVariable, + MinimumMultiplier, + MaximumMultiplier, + >; +} + +pub mod weight_ratios { + use frame_support::weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}; + use sp_runtime::Perbill; + + /// We use at most 5% of the block weight running scheduled tasks during `on_initialize`. + pub const SCHEDULED_TASKS_INITIALIZE_RATIO: Perbill = Perbill::from_percent(5); + + /// We assume that ~5% of the block weight is consumed by `on_initialize` handlers. This is + /// used to limit the maximal weight of a single extrinsic. + pub const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(5); + + /// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used by + /// `Operational` extrinsics. + pub const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); + + /// We allow for 0.5 seconds of compute with a 12 second average block time. + pub const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts( + WEIGHT_REF_TIME_PER_SECOND.saturating_div(2), + polkadot_primitives::MAX_POV_SIZE as u64, + ); +} diff --git a/container-chains/runtime-templates/common/src/fees.rs b/container-chains/runtime-templates/common/src/fees.rs new file mode 100644 index 000000000..d67321cc4 --- /dev/null +++ b/container-chains/runtime-templates/common/src/fees.rs @@ -0,0 +1,55 @@ +// This file is part of Ava Protocol. + +// Copyright (C) 2022 Ava Protocol +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +use frame_support::traits::{Imbalance, OnUnbalanced}; +use pallet_balances::NegativeImbalance; + +pub struct DealWithInclusionFees(sp_std::marker::PhantomData); +impl OnUnbalanced> for DealWithInclusionFees +where + R: pallet_balances::Config + pallet_treasury::Config, + pallet_treasury::Pallet: OnUnbalanced>, +{ + fn on_unbalanceds(mut fees_then_tips: impl Iterator>) { + if let Some(mut fees) = fees_then_tips.next() { + if let Some(tips) = fees_then_tips.next() { + tips.merge_into(&mut fees); + } + // 20% burned, 80% to the treasury + let (_, to_treasury) = fees.ration(20, 80); + // Balances pallet automatically burns dropped Negative Imbalances by decreasing + // total_supply accordingly + as OnUnbalanced<_>>::on_unbalanced(to_treasury); + } + } +} + +pub struct DealWithExecutionFees(sp_std::marker::PhantomData); +impl OnUnbalanced> for DealWithExecutionFees +where + R: pallet_balances::Config + pallet_treasury::Config, + pallet_treasury::Pallet: OnUnbalanced>, +{ + fn on_unbalanceds(mut fees: impl Iterator>) { + if let Some(fees) = fees.next() { + // 20% burned, 80% to the treasury + let (_, to_treasury) = fees.ration(20, 80); + // Balances pallet automatically burns dropped Negative Imbalances by decreasing + // total_supply accordingly + as OnUnbalanced<_>>::on_unbalanced(to_treasury); + } + } +} diff --git a/container-chains/runtime-templates/common/src/lib.rs b/container-chains/runtime-templates/common/src/lib.rs new file mode 100644 index 000000000..7ccc2f1b0 --- /dev/null +++ b/container-chains/runtime-templates/common/src/lib.rs @@ -0,0 +1,109 @@ +// This file is part of Ava Protocol. + +// Copyright (C) 2022 Ava Protocol +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#![cfg_attr(not(feature = "std"), no_std)] +use frame_support::{parameter_types, traits::Get}; +use orml_traits::currency::MutationHooks; +use sp_std::marker::PhantomData; + +pub mod constants; +pub mod fees; + +pub struct CurrencyHooks(PhantomData, DustAccount); +impl MutationHooks + for CurrencyHooks +where + T: orml_tokens::Config, + DustAccount: Get<::AccountId>, +{ + type OnDust = orml_tokens::TransferDust; + type OnSlash = (); + type PreDeposit = (); + type PostDeposit = (); + type PreTransfer = (); + type PostTransfer = (); + type OnNewTokenAccount = (); + type OnKilledTokenAccount = (); +} + +pub mod config { + + pub mod orml_asset_registry { + use crate::*; + use ava_protocol_primitives::{assets::CustomMetadata, Balance}; + use orml_traits::asset_registry::AssetMetadata; + + parameter_types! { + pub const StringLimit: u32 = 50; + } + + pub type AssetMetadataOf = AssetMetadata; + // type CurrencyAdapter = orml_tokens::MultiTokenCurrencyAdapter; + + // pub struct SequentialIdWithCreation(PhantomData); + // impl AssetProcessor for SequentialIdWithCreation + // where + // T: orml_asset_registry::Config, + // T: orml_tokens::Config, + // T: pallet_treasury::Config, + // TokenId: From<::CurrencyId>, + // { + // fn pre_register( + // id: Option, + // asset_metadata: AssetMetadataOf, + // ) -> Result<(TokenId, AssetMetadataOf), DispatchError> { + // let next_id = CurrencyAdapter::::get_next_currency_id(); + // let asset_id = id.unwrap_or(next_id.into()); + // let treasury_account = + // config::TreasuryPalletIdOf::::get().into_account_truncating(); + + // match asset_id.cmp(&next_id.into()) { + // Ordering::Equal => + // CurrencyAdapter::::create(&treasury_account, Default::default()) + // .and_then(|created_asset_id| { + // match created_asset_id.cmp(&asset_id.into()) { + // Ordering::Equal => Ok((asset_id, asset_metadata)), + // _ => + // Err(orml_asset_registry::Error::::InvalidAssetId.into()), + // } + // }), + // Ordering::Less => Ok((asset_id, asset_metadata)), + // _ => Err(orml_asset_registry::Error::::InvalidAssetId.into()), + // } + // } + // } + + // pub struct AssetAuthority(PhantomData); + // impl EnsureOriginWithArg> for AssetAuthority + // where + // T: frame_system::Config, + // { + // type Success = (); + + // fn try_origin( + // origin: T::RuntimeOrigin, + // _asset_id: &Option, + // ) -> Result { + // as EnsureOrigin>::try_origin(origin) + // } + + // #[cfg(feature = "runtime-benchmarks")] + // fn try_successful_origin(_: &Option) -> Result { + // Ok(T::RuntimeOrigin::root()) + // } + // } + } +} diff --git a/container-chains/runtime-templates/simple/Cargo.toml b/container-chains/runtime-templates/simple/Cargo.toml index 67950279e..ce6cba996 100644 --- a/container-chains/runtime-templates/simple/Cargo.toml +++ b/container-chains/runtime-templates/simple/Cargo.toml @@ -13,7 +13,7 @@ targets = [ "x86_64-unknown-linux-gnu" ] workspace = true [dependencies] -hex-literal = { workspace = true, optional = true } +hex-literal = { workspace = true } log = { workspace = true } parity-scale-codec = { workspace = true, features = [ "derive" ] } scale-info = { workspace = true, features = [ "derive" ] } @@ -21,10 +21,15 @@ serde = { workspace = true, features = [ "derive" ] } smallvec = { workspace = true } # Local +ava-protocol-primitives = { workspace = true } +common-runtime = { workspace = true } dp-consensus = { workspace = true } dp-impl-tanssi-pallets-config = { workspace = true } dp-slot-duration-runtime-api = { workspace = true } +pallet-automation-price = { workspace = true } +pallet-automation-time = { workspace = true } pallet-cc-authorities-noting = { workspace = true } +pallet-xcmp-handler = { workspace = true } tanssi-runtime-common = { workspace = true } # Moonkit @@ -48,6 +53,8 @@ frame-system-rpc-runtime-api = { workspace = true } pallet-asset-rate = { workspace = true } pallet-assets = { workspace = true } pallet-balances = { workspace = true } +pallet-collective = { workspace = true } +pallet-membership = { workspace = true } pallet-message-queue = { workspace = true } pallet-multisig = { workspace = true } pallet-proxy = { workspace = true } @@ -99,6 +106,14 @@ cumulus-primitives-utility = { workspace = true } parachain-info = { workspace = true } parachains-common = { workspace = true } + +# ORML +orml-asset-registry = { workspace = true } +orml-currencies = { workspace = true } +orml-tokens = { workspace = true } +orml-traits = { workspace = true } +orml-xtokens = { workspace = true } + # Benchmarking frame-benchmarking = { workspace = true, optional = true } frame-system-benchmarking = { workspace = true, optional = true } @@ -110,6 +125,8 @@ substrate-wasm-builder = { workspace = true } default = [ "std" ] std = [ "async-backing-primitives/std", + "ava-protocol-primitives/std", + "common-runtime/std", "cumulus-pallet-dmp-queue/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-session-benchmarking/std", @@ -131,14 +148,24 @@ std = [ "frame-try-runtime/std", "log/std", "nimbus-primitives/std", + "orml-asset-registry/std", + "orml-currencies/std", + "orml-tokens/std", + "orml-traits/std", + "orml-xtokens/std", "pallet-asset-rate/std", "pallet-assets/std", "pallet-async-backing/std", "pallet-author-inherent/std", + "pallet-automation-price/std", + "pallet-automation-time/std", + "pallet-automation-time/std", "pallet-balances/std", "pallet-cc-authorities-noting/std", + "pallet-collective/std", "pallet-foreign-asset-creator/std", "pallet-maintenance-mode/std", + "pallet-membership/std", "pallet-message-queue/std", "pallet-migrations/std", "pallet-multisig/std", @@ -154,6 +181,7 @@ std = [ "pallet-xcm-benchmarks?/std", "pallet-xcm-executor-utils/std", "pallet-xcm/std", + "pallet-xcmp-handler/std", "parachain-info/std", "parachains-common/std", "parity-scale-codec/std", @@ -199,14 +227,20 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", - "hex-literal", "nimbus-primitives/runtime-benchmarks", + "orml-asset-registry/runtime-benchmarks", + "orml-tokens/runtime-benchmarks", + "orml-xtokens/runtime-benchmarks", "pallet-asset-rate/runtime-benchmarks", "pallet-assets/runtime-benchmarks", "pallet-author-inherent/runtime-benchmarks", + "pallet-automation-price/runtime-benchmarks", + "pallet-automation-time/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-cc-authorities-noting/runtime-benchmarks", + "pallet-collective/runtime-benchmarks", "pallet-foreign-asset-creator/runtime-benchmarks", + "pallet-membership/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", "pallet-migrations/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", @@ -218,6 +252,7 @@ runtime-benchmarks = [ "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm-executor-utils/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", + "pallet-xcmp-handler/runtime-benchmarks", "parachains-common/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-runtime-common/runtime-benchmarks", @@ -240,14 +275,27 @@ try-runtime = [ "frame-try-runtime", "frame-try-runtime/try-runtime", "nimbus-primitives/try-runtime", + "orml-asset-registry/try-runtime", + "orml-asset-registry/try-runtime", + "orml-currencies/try-runtime", + "orml-currencies/try-runtime", + "orml-tokens/try-runtime", + "orml-tokens/try-runtime", + "orml-xtokens/try-runtime", + "orml-xtokens/try-runtime", "pallet-asset-rate/try-runtime", "pallet-assets/try-runtime", "pallet-async-backing/try-runtime", "pallet-author-inherent/try-runtime", + "pallet-automation-price/try-runtime", + "pallet-automation-time/try-runtime", + "pallet-automation-time/try-runtime", "pallet-balances/try-runtime", "pallet-cc-authorities-noting/try-runtime", + "pallet-collective/try-runtime", "pallet-foreign-asset-creator/try-runtime", "pallet-maintenance-mode/try-runtime", + "pallet-membership/try-runtime", "pallet-message-queue/try-runtime", "pallet-migrations/try-runtime", "pallet-multisig/try-runtime", @@ -261,6 +309,8 @@ try-runtime = [ "pallet-utility/try-runtime", "pallet-xcm-executor-utils/try-runtime", "pallet-xcm/try-runtime", + "pallet-xcmp-handler/try-runtime", + "pallet-xcmp-handler/try-runtime", "parachain-info/try-runtime", "polkadot-runtime-common/try-runtime", "sp-runtime/try-runtime", diff --git a/container-chains/runtime-templates/simple/src/lib.rs b/container-chains/runtime-templates/simple/src/lib.rs index ab3d03e29..d2b7e204f 100644 --- a/container-chains/runtime-templates/simple/src/lib.rs +++ b/container-chains/runtime-templates/simple/src/lib.rs @@ -34,17 +34,20 @@ pub mod weights; pub use sp_runtime::{MultiAddress, Perbill, Permill}; use { + ava_protocol_primitives::{AbsoluteAndRelativeReserveProvider, EnsureProxy}, + common_runtime::constants::weight_ratios::SCHEDULED_TASKS_INITIALIZE_RATIO, cumulus_primitives_core::AggregateMessageOrigin, dp_impl_tanssi_pallets_config::impl_tanssi_pallets_config, frame_support::{ construct_runtime, dispatch::DispatchClass, + ensure, genesis_builder_helper::{build_state, get_preset}, pallet_prelude::DispatchResult, parameter_types, traits::{ tokens::ConversionToAssetBalance, ConstBool, ConstU128, ConstU32, ConstU64, ConstU8, - Contains, InsideBoth, InstanceFilter, + Contains, EnsureOrigin, EnsureOriginWithArg, InsideBoth, InstanceFilter, }, weights::{ constants::{ @@ -54,6 +57,7 @@ use { ConstantMultiplier, Weight, WeightToFee as _, WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, }, + PalletId, }, frame_system::{ limits::{BlockLength, BlockWeights}, @@ -71,7 +75,7 @@ use { sp_core::{MaxEncodedLen, OpaqueMetadata}, sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, IdentifyAccount, Verify}, + traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, IdentifyAccount, Verify, Zero}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, MultiSignature, }, @@ -86,7 +90,12 @@ use { }, }; +use ava_protocol_primitives::{assets::CustomMetadata, TokenId}; + pub mod xcm_config; +use xcm_config::{ + FeePerSecondProvider, SelfLocationAbsolute, ToTreasury, TokenIdConvert, UniversalLocation, +}; // Polkadot imports use polkadot_runtime_common::BlockHashCount; @@ -224,13 +233,15 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("container-chain-template"), impl_name: create_runtime_str!("container-chain-template"), authoring_version: 1, - spec_version: 800, + spec_version: 801, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, state_version: 1, }; +pub const NATIVE_TOKEN_ID: TokenId = 0; + /// This determines the average expected block time that we are targeting. /// Blocks will be produced at a minimum duration defined by `SLOT_DURATION`. /// `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked @@ -375,10 +386,12 @@ impl frame_system::Config for Runtime { parameter_types! { pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; + pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 50; } impl pallet_balances::Config for Runtime { - type MaxLocks = ConstU32<50>; + type MaxLocks = MaxLocks; /// The type for recording an account's balance. type Balance = Balance; /// The ubiquitous event type. @@ -386,7 +399,7 @@ impl pallet_balances::Config for Runtime { type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type MaxReserves = ConstU32<50>; + type MaxReserves = MaxReserves; type ReserveIdentifier = [u8; 8]; type FreezeIdentifier = RuntimeFreezeReason; type MaxFreezes = ConstU32<0>; @@ -659,6 +672,196 @@ impl pallet_multisig::Config for Runtime { type WeightInfo = weights::pallet_multisig::SubstrateWeight; } +parameter_types! { + // pub const ProposalBond: Permill = Permill::from_percent(5); + // pub const ProposalBondMinimum: Balance = 1 * DOLLAR; + // pub const ProposalBondMaximum: Balance = 5 * DOLLAR; + // pub const SpendPeriod: BlockNumber = 7 * DAYS; + // pub const Burn: Permill = Permill::from_percent(100); + // pub const TipCountdown: BlockNumber = 1 * DAYS; + // pub const TipFindersFee: Percent = Percent::from_percent(20); + // pub const TipReportDepositBase: Balance = 1 * UNIT; + // pub const DataDepositPerByte: Balance = 1 * CENT; + // pub const BountyDepositBase: Balance = 1 * UNIT; + // pub const BountyDepositPayoutDelay: BlockNumber = 1 * DAYS; + pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); + // pub const BountyUpdatePeriod: BlockNumber = 14 * DAYS; + // pub const CuratorDepositMultiplier: Permill = Permill::from_percent(50); + // pub CuratorDepositMin: Balance = DOLLAR; + // pub CuratorDepositMax: Balance = 100 * DOLLAR; + // pub const BountyValueMinimum: Balance = 5 * UNIT; +} + +pub struct AssetAuthority; +impl EnsureOriginWithArg> for AssetAuthority { + type Success = (); + + fn try_origin( + origin: RuntimeOrigin, + _asset_id: &Option, + ) -> Result { + as EnsureOrigin>::try_origin(origin) + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin(_asset_id: &Option) -> Result { + // EnsureRoot::try_successful_origin() + as EnsureOrigin>::try_successful_origin() + } +} + +impl orml_asset_registry::module::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type CustomMetadata = CustomMetadata; + type AssetId = TokenId; + type AuthorityOrigin = AssetAuthority; + type AssetProcessor = orml_asset_registry::SequentialId; + type Balance = Balance; + type WeightInfo = weights::asset_registry_weights::SubstrateWeight; + type StringLimit = common_runtime::config::orml_asset_registry::StringLimit; +} + +// parameter_types! { +// pub const GetNativeCurrencyId: TokenId = NATIVE_TOKEN_ID; +// } + +parameter_types! { + pub const MaxScheduleSeconds: u64 = 6 * 30 * 24 * 60 * 60; // 6 months in seconds + pub const SlotSizeSeconds: u64 = 600; // 10 minutes in seconds + pub const MaxBlockWeight: u64 = MAXIMUM_BLOCK_WEIGHT.ref_time(); + pub const MaxWeightPercentage: Perbill = SCHEDULED_TASKS_INITIALIZE_RATIO; + pub const UpdateQueueRatio: Perbill = Perbill::from_percent(50); + pub const ExecutionWeightFee: Balance = 12; +} + +// parameter_types! { +// pub const CouncilMotionDuration: BlockNumber = 3 * DAYS; +// pub MaxProposalWeight: Weight = Perbill::from_percent(50) * RuntimeBlockWeights::get().max_block; +// } + +// type CouncilCollective = pallet_collective::Instance1; +// impl pallet_collective::Config for Runtime { +// type RuntimeOrigin = RuntimeOrigin; +// type Proposal = RuntimeCall; +// type RuntimeEvent = RuntimeEvent; +// type MotionDuration = CouncilMotionDuration; +// type MaxProposals = ConstU32<100>; +// type MaxMembers = ConstU32<100>; +// type DefaultVote = pallet_collective::PrimeDefaultVote; +// type WeightInfo = pallet_collective::weights::SubstrateWeight; +// type SetMembersOrigin = EnsureRoot; +// type MaxProposalWeight = MaxProposalWeight; +// } + +// type MoreThanHalfCouncil = EitherOfDiverse< +// EnsureRoot, +// pallet_collective::EnsureProportionMoreThan, +// >; + +// impl pallet_membership::Config for Runtime { +// type RuntimeEvent = RuntimeEvent; +// type AddOrigin = MoreThanHalfCouncil; +// type RemoveOrigin = MoreThanHalfCouncil; +// type SwapOrigin = MoreThanHalfCouncil; +// type ResetOrigin = MoreThanHalfCouncil; +// type PrimeOrigin = MoreThanHalfCouncil; +// type MembershipInitialized = TechnicalCommittee; +// type MembershipChanged = TechnicalCommittee; +// type MaxMembers = ConstU32<100>; +// type WeightInfo = pallet_membership::weights::SubstrateWeight; +// } + +// impl pallet_valve::Config for Runtime { +// type RuntimeEvent = RuntimeEvent; +// type WeightInfo = pallet_valve::weights::SubstrateWeight; +// type ClosedCallFilter = ClosedCallFilter; +// type AutomationTime = AutomationTime; +// // type AutomationPrice = AutomationPrice; +// type CallAccessFilter = TechnicalMembership; +// } + +pub struct ScheduleAllowList; +impl Contains for ScheduleAllowList { + fn contains(c: &RuntimeCall) -> bool { + match c { + RuntimeCall::System(_) => true, + RuntimeCall::Balances(_) => true, + // RuntimeCall::ParachainStaking(_) => true, + RuntimeCall::XTokens(_) => true, + RuntimeCall::Utility(_) => true, + RuntimeCall::Currencies(_) => true, + _ => false, + } + } +} + +pub struct AutomationEnsureProxy; +impl EnsureProxy for AutomationEnsureProxy { + fn ensure_ok(delegator: AccountId, delegatee: AccountId) -> Result<(), &'static str> { + // We only allow for "Any" proxies + let def: pallet_proxy::ProxyDefinition = + pallet_proxy::Pallet::::find_proxy( + &delegator, + &delegatee, + Some(ProxyType::Any), + ) + .map_err(|_| "proxy error: expected `ProxyType::Any`")?; + // We only allow to use it for delay zero proxies, as the call will immediatly be executed + ensure!(def.delay.is_zero(), "proxy delay is Non-zero`"); + Ok(()) + } +} + +impl pallet_automation_time::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type MaxTasksPerSlot = ConstU32<256>; + type MaxExecutionTimes = ConstU32<36>; + type MaxScheduleSeconds = MaxScheduleSeconds; + type MaxBlockWeight = MaxBlockWeight; + type MaxWeightPercentage = MaxWeightPercentage; + // Roughly .125% of parachain block weight per hour + // ≈ 500_000_000_000 (MaxBlockWeight) * 300 (Blocks/Hour) * .00125 + type MaxWeightPerSlot = ConstU128<150_000_000_000>; + type SlotSizeSeconds = SlotSizeSeconds; + type UpdateQueueRatio = UpdateQueueRatio; + type WeightInfo = pallet_automation_time::weights::SubstrateWeight; + type ExecutionWeightFee = ExecutionWeightFee; + type Currency = Balances; + type MultiCurrency = Currencies; + type CurrencyId = TokenId; + type XcmpTransactor = XcmpHandler; + type FeeHandler = pallet_automation_time::FeeHandler; + type CurrencyIdConvert = TokenIdConvert; + type FeeConversionRateProvider = FeePerSecondProvider; + type RuntimeCall = RuntimeCall; + type ScheduleAllowList = ScheduleAllowList; + type EnsureProxy = AutomationEnsureProxy; + type UniversalLocation = UniversalLocation; + type ReserveProvider = AbsoluteAndRelativeReserveProvider; + type SelfLocation = SelfLocationAbsolute; +} + +impl pallet_automation_price::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type MaxTasksPerSlot = ConstU32<1>; + type MaxTasksPerAccount = ConstU32<32>; + type MaxTasksOverall = ConstU32<16_384>; + type MaxBlockWeight = MaxBlockWeight; + type MaxWeightPercentage = MaxWeightPercentage; + type WeightInfo = pallet_automation_price::weights::SubstrateWeight; + type ExecutionWeightFee = ExecutionWeightFee; + type Currency = Balances; + type MultiCurrency = Currencies; + type CurrencyId = TokenId; + type XcmpTransactor = XcmpHandler; + type EnsureProxy = AutomationEnsureProxy; + type CurrencyIdConvert = TokenIdConvert; + type FeeConversionRateProvider = FeePerSecondProvider; + type FeeHandler = pallet_automation_price::FeeHandler; + type UniversalLocation = UniversalLocation; + type SelfParaId = parachain_info::Pallet; +} + impl_tanssi_pallets_config!(Runtime); // Create the runtime by composing the FRAME pallets that were previously configured. @@ -676,6 +879,8 @@ construct_runtime!( Migrations: pallet_migrations = 7, MaintenanceMode: pallet_maintenance_mode = 8, TxPause: pallet_tx_pause = 9, + // TechnicalCommittee: pallet_collective::::{Pallet, Call, Storage, Event, Origin, Config} = 10, + // TechnicalMembership: pallet_membership::::{Pallet, Call, Storage, Event, Config} = 11, // Monetary stuff. Balances: pallet_balances = 10, @@ -702,6 +907,16 @@ construct_runtime!( RootTesting: pallet_root_testing = 100, AsyncBacking: pallet_async_backing::{Pallet, Storage} = 110, + // ORML related pallets + AssetRegistry: orml_asset_registry::module = 201, + XTokens: orml_xtokens = 202, + Tokens: orml_tokens = 203, + Currencies: orml_currencies = 204, + + // Custom pallets + XcmpHandler: pallet_xcmp_handler = 205, + AutomationTime: pallet_automation_time = 206, + AutomationPrice: pallet_automation_price = 207, } ); diff --git a/container-chains/runtime-templates/simple/src/weights/asset_registry_weights.rs b/container-chains/runtime-templates/simple/src/weights/asset_registry_weights.rs new file mode 100644 index 000000000..a4671cbf8 --- /dev/null +++ b/container-chains/runtime-templates/simple/src/weights/asset_registry_weights.rs @@ -0,0 +1,109 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for orml_asset_registry +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2022-09-13, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `Lauras-MacBook-Pro.local`, CPU: `` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("turing-dev"), DB CACHE: 1024 + +// Executed Command: +// target/release/container-chain-simple-node +// benchmark +// pallet +// --chain +// turing-dev +// --execution +// wasm +// --wasm-execution +// compiled +// --pallet +// orml_asset_registry +// --extrinsic +// * +// --repeat +// 20 +// --steps +// 50 +// --output +// raw-weights.rs +// --template +// ./.maintain/frame-weight-template.hbs + +// Summary: +//:register_asset 25_000_000 +//:update_asset 20_000_000 + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for orml_asset_registry. +pub trait WeightInfo { + fn register_asset() -> Weight; + fn update_asset() -> Weight; + fn set_asset_location() -> Weight; +} + +/// Weights for orml_asset_registry using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl orml_asset_registry::WeightInfo for SubstrateWeight { + // Storage: AssetRegistry LastAssetId (r:1 w:1) + // Storage: AssetRegistry Metadata (r:1 w:1) + // Storage: AssetRegistry LocationToAssetId (r:1 w:1) + fn register_asset() -> Weight { + Weight::from_parts(25_000_000_u64, 0u64) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + // Storage: AssetRegistry Metadata (r:1 w:1) + fn update_asset() -> Weight { + Weight::from_parts(20_000_000_u64, 0u64) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + // Weight not used in pallet + fn set_asset_location() -> Weight { + Weight::zero() + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: AssetRegistry LastAssetId (r:1 w:1) + // Storage: AssetRegistry Metadata (r:1 w:1) + // Storage: AssetRegistry LocationToAssetId (r:1 w:1) + fn register_asset() -> Weight { + Weight::from_parts(25_000_000_u64, 0u64) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + // Storage: AssetRegistry Metadata (r:1 w:1) + fn update_asset() -> Weight { + Weight::from_parts(20_000_000_u64, 0u64) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + // Weight not used in pallet + fn set_asset_location() -> Weight { + Weight::zero() + } +} diff --git a/container-chains/runtime-templates/simple/src/weights/mod.rs b/container-chains/runtime-templates/simple/src/weights/mod.rs index 3bc8f2350..e42f4e752 100644 --- a/container-chains/runtime-templates/simple/src/weights/mod.rs +++ b/container-chains/runtime-templates/simple/src/weights/mod.rs @@ -37,3 +37,5 @@ pub mod pallet_utility; pub mod pallet_xcm; pub mod pallet_xcm_executor_utils; pub mod xcm; + +pub mod asset_registry_weights; diff --git a/container-chains/runtime-templates/simple/src/xcm_config.rs b/container-chains/runtime-templates/simple/src/xcm_config.rs index 43eae56ac..ddceb4ef7 100644 --- a/container-chains/runtime-templates/simple/src/xcm_config.rs +++ b/container-chains/runtime-templates/simple/src/xcm_config.rs @@ -18,18 +18,25 @@ use { super::{ currency::MICROUNIT, weights::{self, xcm::XcmWeight as XcmGenericWeights}, - AccountId, AllPalletsWithSystem, AssetRate, Balance, Balances, ForeignAssetsCreator, - MaintenanceMode, MessageQueue, ParachainInfo, ParachainSystem, PolkadotXcm, Runtime, - RuntimeBlockWeights, RuntimeCall, RuntimeEvent, RuntimeOrigin, TransactionByteFee, - WeightToFee, XcmpQueue, + AccountId, AllPalletsWithSystem, AssetRate, Balance, Balances, BlockNumber, Currencies, + ForeignAssetsCreator, MaintenanceMode, MaxLocks, MaxReserves, MessageQueue, ParachainInfo, + ParachainSystem, PolkadotXcm, Runtime, RuntimeBlockWeights, RuntimeCall, RuntimeEvent, + RuntimeOrigin, TokenId, Tokens, TransactionByteFee, TreasuryPalletId, WeightToFee, + XcmpQueue, NATIVE_TOKEN_ID, }, + ava_protocol_primitives::{AbsoluteAndRelativeReserveProvider, Amount}, + common_runtime::CurrencyHooks, cumulus_primitives_core::{AggregateMessageOrigin, ParaId}, frame_support::{ parameter_types, - traits::{Everything, Nothing, PalletInfoAccess, TransformOrigin}, + traits::{Contains, Everything, Nothing, PalletInfoAccess, TransformOrigin}, weights::Weight, }, frame_system::EnsureRoot, + orml_traits::{ + asset_registry::Inspect, parameter_type_with_key, FixedConversionRateProvider, + MultiCurrency, + }, pallet_xcm::XcmPassthrough, pallet_xcm_executor_utils::{ filters::{IsReserveFilter, IsTeleportFilter}, @@ -38,15 +45,18 @@ use { parachains_common::message_queue::{NarrowOriginToSibling, ParaIdToSibling}, polkadot_runtime_common::xcm_sender::ExponentialPrice, sp_core::ConstU32, - sp_runtime::Perbill, + sp_runtime::{ + traits::{AccountIdConversion, Convert}, + Perbill, Percent, + }, staging_xcm::latest::prelude::*, staging_xcm_builder::{ AccountId32Aliases, AllowKnownQueryResponses, AllowSubscriptionsFrom, - AllowTopLevelPaidExecutionFrom, ConvertedConcreteId, EnsureXcmOrigin, FungibleAdapter, - IsConcrete, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, + AllowTopLevelPaidExecutionFrom, ConvertedConcreteId, EnsureXcmOrigin, FixedWeightBounds, + FungibleAdapter, IsConcrete, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, UsingComponents, WeightInfoBounds, - WithComputedOrigin, + SovereignSignedViaLocation, TakeRevenue, TakeWeightCredit, UsingComponents, + WeightInfoBounds, WithComputedOrigin, }, staging_xcm_executor::XcmExecutor, }; @@ -430,3 +440,189 @@ impl pallet_xcm_executor_utils::Config for Runtime { type SetTeleportTrustOrigin = EnsureRoot; type WeightInfo = weights::pallet_xcm_executor_utils::SubstrateWeight; } + +pub struct ToTreasury; +impl TakeRevenue for ToTreasury { + fn take_revenue(revenue: Asset) { + if let Asset { + id: AssetId(id), + fun: Fungibility::Fungible(amount), + } = revenue + { + if let Some(currency_id) = TokenIdConvert::convert(id) { + if currency_id == NATIVE_TOKEN_ID { + // Deposit to native treasury account + // 20% burned, 80% to the treasury + let to_treasury = Percent::from_percent(80).mul_floor(amount); + // Due to the way XCM works the amount has already been taken off the total allocation balance. + // Thus whatever we deposit here gets added back to the total allocation, and the rest is burned. + let _ = Currencies::deposit(currency_id, &TreasuryAccount::get(), to_treasury); + } else { + // Deposit to foreign treasury account + let _ = Currencies::deposit( + currency_id, + &TemporaryForeignTreasuryAccount::get(), + amount, + ); + } + } + } + } +} + +type AssetRegistryOf = orml_asset_registry::module::Pallet; + +pub struct FeePerSecondProvider; +impl FixedConversionRateProvider for FeePerSecondProvider { + fn get_fee_per_second(location: &Location) -> Option { + let metadata = match location.interior.first() { + Some(Junction::Parachain(para_id)) + if *para_id == u32::from(ParachainInfo::parachain_id()) => + { + AssetRegistryOf::::metadata(NATIVE_TOKEN_ID)? + } + _ => AssetRegistryOf::::metadata_by_location(location)?, + }; + + metadata.additional.fee_per_second + } +} + +// pub type Trader = +// (AssetRegistryTrader, ToTreasury>,); + +pub struct TokenIdConvert; +impl Convert> for TokenIdConvert { + fn convert(id: TokenId) -> Option { + match AssetRegistryOf::::location(&id) { + Ok(Some(multi_location)) => { + let location: Location = Location::try_from(multi_location).unwrap(); + Some(location) + } + _ => None, + } + } +} + +impl Convert> for TokenIdConvert { + fn convert(location: Location) -> Option { + if let Some(Junction::Parachain(para_id)) = location.interior.first() { + if *para_id == u32::from(ParachainInfo::parachain_id()) { + return Some(NATIVE_TOKEN_ID); + } + } + + AssetRegistryOf::::asset_id(&location) + } +} + +impl Convert> for TokenIdConvert { + fn convert(asset: Asset) -> Option { + let Asset { + id: AssetId(location), + .. + } = asset; + Self::convert(location) + } +} + +pub struct AccountIdToMultiLocation; +impl Convert for AccountIdToMultiLocation { + fn convert(account: AccountId) -> Location { + AccountId32 { + network: None, + id: account.into(), + } + .into() + } +} + +parameter_types! { + pub SelfLocation: Location = Here.into_location(); + pub SelfLocationAbsolute: Location = Location::new(1, Parachain(ParachainInfo::parachain_id().into())); + pub const BaseXcmWeight: Weight = Weight::from_parts(100_000_000, 0); + pub const MaxAssetsForTransfer: usize = 1; +} + +parameter_type_with_key! { + pub ParachainMinFee: |_location: Location| -> Option { + None + }; +} + +impl orml_xtokens::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type CurrencyId = TokenId; + type CurrencyIdConvert = TokenIdConvert; + type AccountIdToLocation = AccountIdToMultiLocation; + type SelfLocation = SelfLocation; + type XcmExecutor = XcmExecutor; + type Weigher = FixedWeightBounds; + type BaseXcmWeight = BaseXcmWeight; + type UniversalLocation = UniversalLocation; + type MaxAssetsForTransfer = MaxAssetsForTransfer; + // Default impl. Refer to `orml-xtokens` docs for more details. + type MinXcmFee = ParachainMinFee; + type LocationsFilter = Everything; + type ReserveProvider = AbsoluteAndRelativeReserveProvider; + type RateLimiter = (); + type RateLimiterId = (); +} + +parameter_types! { + pub TreasuryAccount: AccountId = TreasuryPalletId::get().into_account_truncating(); + // Until we can codify how to handle forgien tokens that we collect in XCMP fees + // we will send the tokens to a special account to be dealt with. + pub TemporaryForeignTreasuryAccount: AccountId = hex_literal::hex!["8acc2955e592588af0eeec40384bf3b498335ecc90df5e6980f0141e1314eb37"].into(); +} + +pub struct DustRemovalWhitelist; +impl Contains for DustRemovalWhitelist { + fn contains(a: &AccountId) -> bool { + *a == TreasuryAccount::get() || *a == TemporaryForeignTreasuryAccount::get() + } +} + +impl orml_tokens::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type Amount = Amount; + type CurrencyId = TokenId; + type WeightInfo = (); + type ExistentialDeposits = orml_asset_registry::ExistentialDeposits; + type CurrencyHooks = CurrencyHooks; + type MaxLocks = MaxLocks; + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; + type DustRemovalWhitelist = DustRemovalWhitelist; +} + +parameter_types! { + pub const GetNativeCurrencyId: TokenId = NATIVE_TOKEN_ID; +} + +impl orml_currencies::Config for Runtime { + type MultiCurrency = Tokens; + type NativeCurrency = + orml_currencies::BasicCurrencyAdapter; + type GetNativeCurrencyId = GetNativeCurrencyId; + type WeightInfo = (); +} + +impl pallet_xcmp_handler::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type MultiCurrency = Currencies; + type CurrencyId = TokenId; + type GetNativeCurrencyId = GetNativeCurrencyId; + type SelfParaId = parachain_info::Pallet; + type AccountIdToLocation = AccountIdToMultiLocation; + type CurrencyIdToLocation = TokenIdConvert; + type UniversalLocation = UniversalLocation; + type XcmSender = XcmRouter; + type XcmExecutor = XcmExecutor; + type Weigher = FixedWeightBounds; + type ReserveProvider = AbsoluteAndRelativeReserveProvider; + type SelfLocation = SelfLocationAbsolute; +} diff --git a/pallets/automation-price/Cargo.toml b/pallets/automation-price/Cargo.toml new file mode 100644 index 000000000..eef33dc2c --- /dev/null +++ b/pallets/automation-price/Cargo.toml @@ -0,0 +1,129 @@ +[package] +name = "pallet-automation-price" +authors = [ "Ava Protocol Team" ] +description = "Pallet for scheduling and running tasks in the future." +edition = "2021" +homepage = "https://avaprotocol.org" +license = "GPL-3.0" +readme = "README.md" +repository = "https://github.com/AvaProtocol/tanssi-integration" +version = "0.1.0" + +[package.metadata.docs.rs] +targets = [ "x86_64-unknown-linux-gnu" ] + +[dependencies] +log = { workspace = true } +parity-scale-codec = { workspace = true, features = [ "derive" ] } +scale-info = { workspace = true, features = [ "derive" ] } + +# Polkadot +polkadot-parachain-primitives = { workspace = true } +staging-xcm = { workspace = true } + +# Cumulus dependencies +cumulus-pallet-xcm = { workspace = true } +cumulus-primitives-core = { workspace = true } + +## ORML +orml-currencies = { workspace = true } +orml-traits = { workspace = true } + + +# Substrate Dependencies +## Substrate Primitive Dependencies +sp-runtime = { workspace = true } +sp-std = { workspace = true } + +## Substrate FRAME Dependencies +frame-benchmarking = { workspace = true, optional = true } +frame-support = { workspace = true } +frame-system = { workspace = true } + +## Substrate Pallet Dependencies +pallet-timestamp = { workspace = true } + +## Polkdadot deps +staging-xcm-builder = { workspace = true } + +## Local +ava-protocol-primitives = { workspace = true } +pallet-xcmp-handler = { workspace = true } + +[dev-dependencies] +pallet-balances = { workspace = true } +rand = { workspace = true } +serde = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } + +pallet-xcm = { workspace = true } +staging-xcm-executor = { workspace = true } + +# Cumulus dependencies +parachain-info = { workspace = true } + +# ORML +orml-currencies = { workspace = true } +orml-tokens = { workspace = true } + +[features] +default = [ "std" ] +std = [ + "ava-protocol-primitives/std", + "cumulus-pallet-xcm/std", + "cumulus-primitives-core/std", + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", + "log/std", + "orml-currencies/std", + "orml-tokens/std", + "orml-traits/std", + "pallet-balances/std", + "pallet-timestamp/std", + "pallet-xcm/std", + "pallet-xcmp-handler/std", + "parachain-info/std", + "parity-scale-codec/std", + "polkadot-parachain-primitives/std", + "rand/std", + "scale-info/std", + "serde/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "staging-xcm-builder/std", + "staging-xcm-executor/std", + "staging-xcm/std", +] +dev-queue = [] +runtime-benchmarks = [ + "cumulus-primitives-core/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "orml-tokens/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "pallet-xcm/runtime-benchmarks", + "pallet-xcmp-handler/runtime-benchmarks", + "polkadot-parachain-primitives/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "staging-xcm-builder/runtime-benchmarks", + "staging-xcm-executor/runtime-benchmarks", +] +try-runtime = [ + "cumulus-pallet-xcm/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "orml-currencies/try-runtime", + "orml-tokens/try-runtime", + "pallet-balances/try-runtime", + "pallet-timestamp/try-runtime", + "pallet-xcm/try-runtime", + "pallet-xcmp-handler/try-runtime", + "parachain-info/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/pallets/automation-price/src/benchmarking.rs b/pallets/automation-price/src/benchmarking.rs new file mode 100644 index 000000000..5bb52d78f --- /dev/null +++ b/pallets/automation-price/src/benchmarking.rs @@ -0,0 +1,340 @@ +// This file is part of Ava Protocol. + +// Copyright (C) 2022 Ava Protocol +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; +use frame_benchmarking::{account, benchmarks}; +use frame_system::RawOrigin; + +use polkadot_parachain_primitives::primitives::Sibling; +use sp_runtime::traits::{AccountIdConversion, Saturating}; + +use staging_xcm::latest::prelude::*; + +use crate::{ + pallet::{Task, TaskId}, + Config, Pallet as AutomationPrice, +}; + +const SEED: u32 = 0; +// existential deposit multiplier +const ED_MULTIPLIER: u32 = 1_000; +// ensure enough funds to execute tasks +const DEPOSIT_MULTIPLIER: u32 = 100_000_000; + +const CHAIN: &[u8] = "CHAIN".as_bytes(); +const EXCHANGE: &[u8] = "EXCHANGE".as_bytes(); +const ASSET_TUR: &[u8] = "TUR".as_bytes(); +const ASSET_USD: &[u8] = "USD".as_bytes(); +const DECIMAL: u8 = 10_u8; + +// a helper function to prepare asset when setting up tasks or price because asset needs to be +// defined before updating price +fn setup_asset(authorized_wallets: Vec) { + let _ = AutomationPrice::::initialize_asset( + RawOrigin::Root.into(), + CHAIN.to_vec(), + EXCHANGE.to_vec(), + ASSET_TUR.to_vec(), + ASSET_USD.to_vec(), + DECIMAL, + authorized_wallets, + ); +} + +// a helper method to schedule task with a set of default params to support benchmark easier +fn schedule_xcmp_task(para_id: u32, owner: T::AccountId, call: Vec) { + let _ = AutomationPrice::::schedule_xcmp_task( + RawOrigin::Signed(owner).into(), + CHAIN.to_vec(), + EXCHANGE.to_vec(), + ASSET_TUR.to_vec(), + ASSET_USD.to_vec(), + 6000u128, + "gt".as_bytes().to_vec(), + vec![2000], + Box::new(Location::new(1, Parachain(para_id)).into()), + Box::new(Location::default().into()), + Box::new(AssetPayment { + asset_location: Location::new(1, Parachain(para_id)).into(), + amount: 0, + }), + call, + Weight::from_parts(100_000, 0), + Weight::from_parts(200_000, 0), + ); +} + +// direct_task_schedule push the task directly to the task registry and relevant setup, +// by pass the normal extrinsic execution. +// This funciton should be used to prepare data for benchmark +fn direct_task_schedule( + creator: T::AccountId, + task_id: TaskId, + expired_at: u128, + trigger_function: Vec, + price_target: u128, + encoded_call: Vec, +) -> Result<(), Error> { + let para_id: u32 = 2000; + let destination = Location::new(1, Parachain(para_id)); + let schedule_fee = Location::default(); + let execution_fee = AssetPayment { + asset_location: Location::new(1, Parachain(para_id)).into(), + amount: 0, + }; + let encoded_call_weight = Weight::from_parts(100_000, 0); + let overall_weight = Weight::from_parts(200_000, 0); + let schedule_as = account("caller", 0, SEED); + + let action = Action::XCMP { + destination, + schedule_fee, + execution_fee, + encoded_call, + encoded_call_weight, + overall_weight, + schedule_as: Some(schedule_as), + instruction_sequence: InstructionSequence::PayThroughRemoteDerivativeAccount, + }; + + let task: Task = Task:: { + owner_id: creator, + task_id, + chain: CHAIN.to_vec(), + exchange: EXCHANGE.to_vec(), + asset_pair: (ASSET_TUR.to_vec(), ASSET_USD.to_vec()), + expired_at, + trigger_function, + trigger_params: vec![price_target], + action, + }; + + AutomationPrice::::validate_and_schedule_task(task) +} + +benchmarks! { + initialize_asset_extrinsic { + let v in 1..5; + let asset_pair = (ASSET_TUR.to_vec(), ASSET_USD.to_vec()); + + let mut authorized_wallets: Vec = vec![]; + for i in 1..=v { + authorized_wallets.push(account("caller", i, SEED)); + } + } : { + let _ = AutomationPrice::::initialize_asset( + RawOrigin::Root.into(), + CHAIN.to_vec(), EXCHANGE.to_vec(), + ASSET_TUR.to_vec(), ASSET_USD.to_vec(), DECIMAL, authorized_wallets); + } + + asset_price_update_extrinsic { + // Depend on the size of the input, the weight change, ideally scale linearly + // Therefore we can simulate v from 1..100 and substrate will agg those value + let v in 1..100; + let sender : T::AccountId = account("caller", 0, SEED); + + setup_asset::(vec![sender.clone()]); + + let mut chains: Vec> = vec![]; + let mut exchanges: Vec> = vec![]; + let mut assets1: Vec> = vec![]; + let mut assets2: Vec> = vec![]; + let mut prices: Vec = vec![]; + let mut submitted_ats: Vec = vec![]; + let mut rounds: Vec = vec![]; + + for i in 1..=v { + chains.push(format!("CHAIN:{:?}", i).as_bytes().to_vec()); + exchanges.push(format!("EXCHANGE:{:?}", i).as_bytes().to_vec()); + assets1.push(format!("ASSET1{:?}", i).as_bytes().to_vec()); + assets2.push(format!("ASSET2{:?}", i).as_bytes().to_vec()); + prices.push(i as u128); + submitted_ats.push(i as u128); + rounds.push(i as u128); + } + } : { + let _ = AutomationPrice::::update_asset_prices( + RawOrigin::Signed(sender.clone()).into(), + chains, + exchanges, + assets1, + assets2, + prices, + submitted_ats, + rounds + ); + } + + schedule_xcmp_task_extrinsic { + let sender : T::AccountId = account("caller", 0, SEED); + let para_id: u32 = 1000; + let call: Vec = vec![2, 4, 5]; + setup_asset::(vec![sender.clone()]); + let transfer_amount = T::Currency::minimum_balance().saturating_mul(ED_MULTIPLIER.into()); + let _ = T::Currency::deposit_creating( + &sender, + transfer_amount.saturating_mul(DEPOSIT_MULTIPLIER.into()), + ); + + } : { + schedule_xcmp_task::(para_id, sender, call); + } + + cancel_task_extrinsic { + let creator : T::AccountId = account("caller", 0, SEED); + let para_id: u32 = 1000; + let call: Vec = vec![2, 4, 5]; + setup_asset::(vec![creator.clone()]); + let transfer_amount = T::Currency::minimum_balance().saturating_mul(ED_MULTIPLIER.into()); + let _ = T::Currency::deposit_creating( + &creator, + transfer_amount.saturating_mul(DEPOSIT_MULTIPLIER.into()), + ); + + // Schedule 10000 Task, This is just an arbitrary number to simular a big task registry + // Because of using StoragMap, and avoid dealing with vector + // our task look up will always be O(1) for time + let mut task_ids: Vec = vec![]; + for i in 1..100 { + // Fund the account so we can schedule task + let account_min = T::Currency::minimum_balance().saturating_mul(ED_MULTIPLIER.into()); + let _ = T::Currency::deposit_creating(&creator, account_min.saturating_mul(DEPOSIT_MULTIPLIER.into())); + let _ = direct_task_schedule::(creator.clone(), format!("{:?}", i).as_bytes().to_vec(), i, "gt".as_bytes().to_vec(), i, vec![100, 200, (i % 256) as u8]); + task_ids.push(format!("{:?}", i).as_bytes().to_vec()); + } + + let task_id_to_cancel = "1".as_bytes().to_vec(); + } : { + let _ = AutomationPrice::::cancel_task(RawOrigin::Signed(creator).into(), task_id_to_cancel.clone()); + } + verify { + } + + run_xcmp_task { + let creator: T::AccountId = account("caller", 0, SEED); + let para_id: u32 = 2001; + let call = vec![4,5,6]; + + let local_para_id: u32 = 2114; + let destination = Location::new(1, Parachain(para_id)); + let local_sovereign_account: T::AccountId = Sibling::from(local_para_id).into_account_truncating(); + let _ = T::Currency::deposit_creating( + &local_sovereign_account, + T::Currency::minimum_balance().saturating_mul(DEPOSIT_MULTIPLIER.into()), + ); + + let fee = AssetPayment { asset_location: Location::new(1, Parachain(para_id)).into(), amount: 1000u128 }; + }: { + AutomationPrice::::run_xcmp_task(destination, creator, fee, call, Weight::from_parts(100_000, 0), Weight::from_parts(200_000, 0), InstructionSequence::PayThroughSovereignAccount) + } + + remove_task { + let creator : T::AccountId = account("caller", 0, SEED); + let para_id: u32 = 1000; + let call: Vec = vec![2, 4, 5]; + setup_asset::(vec![creator.clone()]); + let transfer_amount = T::Currency::minimum_balance().saturating_mul(ED_MULTIPLIER.into()); + let _ = T::Currency::deposit_creating( + &creator, + transfer_amount.saturating_mul(DEPOSIT_MULTIPLIER.into()), + ); + + let para_id: u32 = 2000; + let destination = Location::new(1, Parachain(para_id)); + let schedule_fee = Location::default(); + let execution_fee = AssetPayment { + asset_location: Location::new(1, Parachain(para_id)).into(), + amount: 0, + }; + let encoded_call_weight = Weight::from_parts(100_000, 0); + let overall_weight = Weight::from_parts(200_000, 0); + let schedule_as: T::AccountId = account("caller", 0, SEED); + + // Schedule 10000 Task, This is just an arbitrary number to simular a big task registry + // Because of using StoragMap, and avoid dealing with vector + // our task look up will always be O(1) for time + let mut task_ids: Vec = vec![]; + let mut tasks: Vec> = vec![]; + for i in 1..100 { + let task_id = format!("{:?}", i).as_bytes().to_vec(); + let expired_at = i; + let trigger_function = "gt".as_bytes().to_vec(); + let price_target: u128 = i; + let encoded_call = vec![100, 200, (i % 256) as u8]; + + task_ids.push(format!("{:?}", i).as_bytes().to_vec()); + let action = Action::XCMP { + destination: destination.clone(), + schedule_fee: schedule_fee.clone(), + execution_fee: execution_fee.clone(), + encoded_call, + encoded_call_weight, + overall_weight, + schedule_as: Some(schedule_as.clone()), + instruction_sequence: InstructionSequence::PayThroughRemoteDerivativeAccount, + }; + + let task: Task = Task:: { + owner_id: creator.clone(), + task_id: task_id.clone(), + chain: CHAIN.to_vec(), + exchange: EXCHANGE.to_vec(), + asset_pair: (ASSET_TUR.to_vec(), ASSET_USD.to_vec()), + expired_at, + trigger_function, + trigger_params: vec![price_target], + action, + }; + let _ = AutomationPrice::::validate_and_schedule_task(task.clone()); + tasks.push(task); + } + + let task = tasks.pop().unwrap(); + }: { + // remove a task at the end to simulate the worst case + AutomationPrice::::remove_task(&task, Some(crate::Event::::TaskSweep { + owner_id: task.owner_id.clone(), + task_id: task.task_id.clone(), + condition: crate::TaskCondition::AlreadyExpired { + expired_at: task.expired_at, + now: 100, + } + })); + } + + + emit_event { + let owner_id: T::AccountId = account("call", 1, SEED); + let schedule_as: T::AccountId = account("schedule_as", 1, SEED); + let task_id: TaskId = vec![1,2,3]; + } : { + AutomationPrice::::deposit_event(crate::Event::::TaskScheduled { + owner_id, + task_id, + schedule_as: Some(schedule_as), + }); + } + + impl_benchmark_test_suite!( + AutomationPrice, + crate::mock::new_test_ext(crate::tests::START_BLOCK_TIME), + crate::mock::Test + ) +} diff --git a/pallets/automation-price/src/fees.rs b/pallets/automation-price/src/fees.rs new file mode 100644 index 000000000..ebb859065 --- /dev/null +++ b/pallets/automation-price/src/fees.rs @@ -0,0 +1,164 @@ +// This file is part of Ava Protocol. + +// Copyright (C) 2022 Ava Protocol +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// ! Traits and default implementation for paying execution fees. +use crate::{AccountOf, Action, ActionOf, Config, Error, MultiBalanceOf, Pallet}; + +use orml_traits::MultiCurrency; +use pallet_xcmp_handler::{InstructionSequence, XcmpTransactor}; +use sp_runtime::{ + traits::{CheckedSub, Convert, Saturating, Zero}, + DispatchError, DispatchResult, SaturatedConversion, + TokenError::BelowMinimum, +}; +use sp_std::marker::PhantomData; +use staging_xcm::latest::prelude::*; +use staging_xcm_builder::TakeRevenue; + +/// Handle execution fee payments in the context of automation actions +pub trait HandleFees { + fn pay_checked_fees_for Result>( + owner: &AccountOf, + action: &ActionOf, + prereq: F, + ) -> Result; +} +pub struct FeeHandler { + owner: T::AccountId, + pub schedule_fee_location: Location, + pub schedule_fee_amount: MultiBalanceOf, + pub execution_fee_amount: MultiBalanceOf, + _phantom_data: PhantomData, +} + +impl HandleFees for FeeHandler +where + T: Config, + TR: TakeRevenue, +{ + fn pay_checked_fees_for Result>( + owner: &AccountOf, + action: &ActionOf, + prereq: F, + ) -> Result { + let fee_handler = Self::new(owner, action)?; + fee_handler + .can_pay_fee() + .map_err(|_| Error::::InsufficientBalance)?; + let outcome = prereq()?; + fee_handler.pay_fees()?; + Ok(outcome) + } +} + +impl FeeHandler +where + T: Config, + TR: TakeRevenue, +{ + /// Ensure the fee can be paid. + fn can_pay_fee(&self) -> Result<(), DispatchError> { + let fee = self + .schedule_fee_amount + .saturating_add(self.execution_fee_amount); + + if fee.is_zero() { + return Ok(()); + } + + // Manually check for ExistenceRequirement since MultiCurrency doesn't currently support it + let currency_id = T::CurrencyIdConvert::convert(self.schedule_fee_location.clone()) + .ok_or("IncoveribleLocation")?; + let currency_id = currency_id.into(); + let free_balance = T::MultiCurrency::free_balance(currency_id, &self.owner); + + free_balance + .checked_sub(&fee) + .ok_or(DispatchError::Token(BelowMinimum))? + .checked_sub(&T::MultiCurrency::minimum_balance(currency_id)) + .ok_or(DispatchError::Token(BelowMinimum))?; + T::MultiCurrency::ensure_can_withdraw(currency_id, &self.owner, fee)?; + Ok(()) + } + + /// Withdraw the fee. + fn withdraw_fee(&self) -> Result<(), DispatchError> { + let fee = self + .schedule_fee_amount + .saturating_add(self.execution_fee_amount); + + if fee.is_zero() { + return Ok(()); + } + + let currency_id = T::CurrencyIdConvert::convert(self.schedule_fee_location.clone()) + .ok_or("IncoveribleLocation")?; + + match T::MultiCurrency::withdraw(currency_id.into(), &self.owner, fee) { + Ok(_) => { + TR::take_revenue(Asset { + id: self.schedule_fee_location.clone().into(), + fun: Fungibility::Fungible(self.schedule_fee_amount.saturated_into()), + }); + + if self.execution_fee_amount > MultiBalanceOf::::zero() { + T::XcmpTransactor::pay_xcm_fee( + currency_id, + self.owner.clone(), + self.execution_fee_amount.saturated_into(), + )?; + } + + Ok(()) + } + Err(_) => Err(DispatchError::Token(BelowMinimum)), + } + } + + /// Builds an instance of the struct + pub fn new(owner: &AccountOf, action: &ActionOf) -> Result { + let schedule_fee_location = action.schedule_fee_location::(); + + let schedule_fee_amount: u128 = + Pallet::::calculate_schedule_fee_amount(action)?.saturated_into(); + + let execution_fee_amount = match action.clone() { + Action::XCMP { + execution_fee, + instruction_sequence: InstructionSequence::PayThroughSovereignAccount, + .. + } => execution_fee.amount.saturated_into(), + _ => 0u32.saturated_into(), + }; + + Ok(Self { + owner: owner.clone(), + schedule_fee_location, + schedule_fee_amount: schedule_fee_amount.saturated_into(), + execution_fee_amount, + _phantom_data: Default::default(), + }) + } + + /// Executes the fee handler + fn pay_fees(self) -> DispatchResult { + // This should never error if can_pay_fee passed. + self.withdraw_fee() + .map_err(|_| Error::::LiquidityRestrictions)?; + Ok(()) + } +} diff --git a/pallets/automation-price/src/lib.rs b/pallets/automation-price/src/lib.rs new file mode 100644 index 000000000..a7eb93bfb --- /dev/null +++ b/pallets/automation-price/src/lib.rs @@ -0,0 +1,1602 @@ +// This file is part of Ava Protocol. + +// Copyright (C) 2022 Ava Protocol +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Automation price pallet +//! +//! DISCLAIMER: This pallet is still in it's early stages. At this point +//! we only support scheduling two tasks per hour, and sending an on-chain +//! with a custom message. +//! +//! This pallet allows a user to schedule tasks. Tasks can scheduled for any whole hour in the future. +//! In order to run tasks this pallet consumes up to a certain amount of weight during `on_initialize`. +//! +//! + +#![cfg_attr(not(feature = "std"), no_std)] +pub use pallet::*; + +pub mod weights; + +pub mod types; +pub use types::*; + +pub mod trigger; +pub use trigger::*; + +mod fees; + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +mod benchmarking; + +pub use fees::*; + +use core::convert::{TryFrom, TryInto}; +use cumulus_primitives_core::InteriorLocation; +use parity_scale_codec::Decode; + +use cumulus_primitives_core::ParaId; +use frame_support::{ + pallet_prelude::{ + DispatchError, DispatchResult, Encode, Get, IsType, MaxEncodedLen, + MaybeSerializeDeserialize, Member, NMapKey, Parameter, StorageDoubleMap, StorageMap, + StorageNMap, StorageValue, StorageVersion, Twox64Concat, ValueQuery, + }, + traits::Currency, + transactional, + weights::constants::WEIGHT_REF_TIME_PER_SECOND, +}; +use frame_system::pallet_prelude::*; +use orml_traits::{FixedConversionRateProvider, MultiCurrency}; +use pallet_timestamp::{self as timestamp}; +use scale_info::{prelude::format, TypeInfo}; +use sp_runtime::{ + traits::{CheckedConversion, Convert, SaturatedConversion, Saturating}, + ArithmeticError, Perbill, +}; +use sp_std::{boxed::Box, collections::btree_map::BTreeMap, ops::Bound::Included, vec, vec::Vec}; + +use ava_protocol_primitives::EnsureProxy; +pub use pallet_xcmp_handler::InstructionSequence; +pub use weights::WeightInfo; + +use pallet_xcmp_handler::XcmpTransactor; +use staging_xcm::{latest::prelude::*, VersionedLocation}; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + pub type AccountOf = ::AccountId; + pub type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + pub type MultiBalanceOf = <::MultiCurrency as MultiCurrency< + ::AccountId, + >>::Balance; + pub type ActionOf = Action>; + + pub type MultiCurrencyId = <::MultiCurrency as MultiCurrency< + ::AccountId, + >>::CurrencyId; + + type UnixTime = u64; + pub type TaskId = Vec; + pub type TaskAddress = (AccountOf, TaskId); + pub type TaskIdList = Vec>; + + type ChainName = Vec; + type Exchange = Vec; + + type AssetName = Vec; + type AssetPair = (AssetName, AssetName); + type AssetPrice = u128; + type TriggerFunction = Vec; + + /// The struct that stores all information needed for a task. + #[derive(Debug, Eq, Encode, Decode, TypeInfo, Clone)] + #[scale_info(skip_type_params(T))] + pub struct Task { + // origin data from the account schedule the tasks + pub owner_id: AccountOf, + + // generated data + pub task_id: TaskId, + + // user input data + pub chain: ChainName, + pub exchange: Exchange, + pub asset_pair: AssetPair, + pub expired_at: u128, + + // TODO: Maybe expose enum? + pub trigger_function: Vec, + pub trigger_params: Vec, + pub action: ActionOf, + } + + /// Needed for assert_eq to compare Tasks in tests due to BoundedVec. + impl PartialEq for Task { + fn eq(&self, other: &Self) -> bool { + // TODO: correct this + self.owner_id == other.owner_id + && self.task_id == other.task_id + && self.asset_pair == other.asset_pair + && self.trigger_function == other.trigger_function + && self.trigger_params == other.trigger_params + } + } + + #[pallet::config] + pub trait Config: frame_system::Config + pallet_timestamp::Config { + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// Weight information for the extrinsics in this module. + type WeightInfo: WeightInfo; + + /// The maximum number of tasks that can be scheduled for a time slot. + #[pallet::constant] + type MaxTasksPerSlot: Get; + + /// The maximum number of tasks that a single user can schedule + #[pallet::constant] + type MaxTasksPerAccount: Get; + + /// The maximum number of tasks across our entire system + #[pallet::constant] + type MaxTasksOverall: Get; + + /// The maximum weight per block. + #[pallet::constant] + type MaxBlockWeight: Get; + + /// The maximum percentage of weight per block used for scheduled tasks. + #[pallet::constant] + type MaxWeightPercentage: Get; + + #[pallet::constant] + type ExecutionWeightFee: Get>; + + /// The Currency type for interacting with balances + type Currency: Currency; + + /// The MultiCurrency type for interacting with balances + type MultiCurrency: MultiCurrency; + + /// The currencyIds that our chain supports. + type CurrencyId: Parameter + + Member + + Copy + + MaybeSerializeDeserialize + + Ord + + TypeInfo + + MaxEncodedLen + + From> + + Into> + + From; + + /// Converts CurrencyId to Multiloc + type CurrencyIdConvert: Convert> + + Convert>; + + /// Handler for fees + type FeeHandler: HandleFees; + + //type Origin: From<::RuntimeOrigin> + // + Into::Origin>>; + + /// Converts between comparable currencies + type FeeConversionRateProvider: FixedConversionRateProvider; + + /// This chain's Universal Location. + type UniversalLocation: Get; + + //The paraId of this chain. + type SelfParaId: Get; + + /// Utility for sending XCM messages + type XcmpTransactor: XcmpTransactor; + + /// Ensure proxy + type EnsureProxy: ava_protocol_primitives::EnsureProxy; + } + + const STORAGE_VERSION: StorageVersion = StorageVersion::new(0); + + #[pallet::pallet] + #[pallet::without_storage_info] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(_); + + // TODO: Cleanup before merge + #[derive(Debug, Encode, Decode, TypeInfo)] + #[scale_info(skip_type_params(T))] + pub struct RegistryInfo { + round: u128, + decimal: u8, + last_update: u64, + oracle_providers: Vec>, + } + + // TODO: Use a ring buffer to also store last n history data effectively + #[derive(Debug, Encode, Decode, TypeInfo)] + #[scale_info(skip_type_params(T))] + pub struct PriceData { + pub round: u128, + pub updated_at: u128, + pub value: u128, + } + + // AssetRegistry holds information and metadata about the asset we support + #[pallet::storage] + #[pallet::getter(fn get_asset_registry_info)] + pub type AssetRegistry = StorageNMap< + _, + ( + NMapKey, + NMapKey, + NMapKey, + ), + RegistryInfo, + >; + + // PriceRegistry holds price only information for the asset we support + #[pallet::storage] + #[pallet::getter(fn get_asset_price_data)] + pub type PriceRegistry = StorageNMap< + _, + ( + NMapKey, + NMapKey, + NMapKey, + ), + PriceData, + >; + + // SortedTasksIndex is our sorted by price task shard + // Each task for a given asset is organized into a BTreeMap + // https://doc.rust-lang.org/std/collections/struct.BTreeMap.html#method.insert + // - key: Trigger Price + // - value: vector of task id + // TODO: move these to a trigger model + // TODO: handle task expiration + #[pallet::storage] + #[pallet::getter(fn get_sorted_tasks_index)] + pub type SortedTasksIndex = StorageNMap< + _, + ( + NMapKey, + NMapKey, + NMapKey, + NMapKey, + ), + BTreeMap>, + >; + + // SortedTasksByExpiration is our expiration sorted tasks + #[pallet::type_value] + pub fn DefaultSortedTasksByExpiration( + ) -> BTreeMap>> { + BTreeMap::>>::new() + } + #[pallet::storage] + #[pallet::getter(fn get_sorted_tasks_by_expiration)] + pub type SortedTasksByExpiration = StorageValue< + Value = BTreeMap>>, + QueryKind = ValueQuery, + OnEmpty = DefaultSortedTasksByExpiration, + >; + + // All active tasks, but organized by account + // In this storage, we only interested in returning task belong to an account, we also want to + // have fast lookup for task inserted/remove into the storage + // + // We also want to remove the expired task, so by leveraging this + #[pallet::storage] + #[pallet::getter(fn get_task)] + pub type Tasks = + StorageDoubleMap<_, Twox64Concat, AccountOf, Twox64Concat, TaskId, Task>; + + // Track various metric on our chain regarding tasks such as total task + // + #[pallet::storage] + #[pallet::getter(fn get_task_stat)] + pub type TaskStats = StorageMap<_, Twox64Concat, StatType, u64>; + + // Track various metric per account regarding tasks + // To count task per account, relying on Tasks storage alone mean we have to iterate overs + // value that share the first key (owner_id) to count. + // + // Store the task count + #[pallet::storage] + #[pallet::getter(fn get_account_stat)] + pub type AccountStats = + StorageDoubleMap<_, Twox64Concat, AccountOf, Twox64Concat, StatType, u64>; + + // TaskQueue stores the task to be executed. To run any tasks, they need to be move into this + // queue, from there our task execution pick it up and run it + // + // When task is run, we check the price once more and if it fall out of range, we move the task + // back to the Tasks Registry + // + // If the task is expired, we also won't run + #[pallet::storage] + #[pallet::getter(fn get_task_queue)] + pub type TaskQueue = StorageValue<_, TaskIdList, ValueQuery>; + + #[pallet::storage] + #[pallet::getter(fn is_shutdown)] + pub type Shutdown = StorageValue<_, bool, ValueQuery>; + + #[pallet::error] + pub enum Error { + InvalidTaskId, + /// Duplicate task + DuplicateTask, + + /// Non existent asset + AssetNotSupported, + AssetNotInitialized, + /// Asset already supported + AssetAlreadySupported, + AssetAlreadyInitialized, + /// Asset cannot be updated by this account + InvalidAssetSudo, + OracleNotAuthorized, + /// Asset must be in triggerable range. + AssetNotInTriggerableRange, + AssetUpdatePayloadMalform, + /// Block Time not set + BlockTimeNotSet, + /// Invalid Expiration Window for new asset + InvalidAssetExpirationWindow, + /// Maximum tasks reached for the slot + MaxTasksReached, + /// Maximum tasks reached for a given account + MaxTasksPerAccountReached, + /// Failed to insert task + TaskInsertionFailure, + /// Failed to remove task + TaskRemoveFailure, + /// Task Not Found When canceling + TaskNotFound, + /// Error when setting task expired less than the current block time + InvalidTaskExpiredAt, + /// Error when failed to update task expiration storage + TaskExpiredStorageFailedToUpdate, + /// Insufficient Balance + InsufficientBalance, + /// Restrictions on Liquidity in Account + LiquidityRestrictions, + /// Too Many Assets Created + AssetLimitReached, + + FeePaymentError, + CannotReanchor, + UnsupportedFeePayment, + /// The version of the `VersionedLocation` value used is not able + /// to be interpreted. + BadVersion, + } + + /// This is a event helper struct to help us making sense of the chain state and surrounded + /// environment state when we emit an event during task execution or task scheduling. + /// + /// They should contains enough information for an operator to look at and reason about "why do we + /// got here". + /// Many fields on this struct is optinal to support multiple error condition + #[derive(Debug, Encode, Eq, PartialEq, Decode, TypeInfo, Clone)] + pub enum TaskCondition { + TargetPriceMatched { + // record the state of the asset at the time the task is triggered + // when debugging we can use this to reason about why did the task is trigger + chain: ChainName, + exchange: Exchange, + asset_pair: AssetPair, + price: u128, + }, + AlreadyExpired { + // the original expired_at of this task + expired_at: u128, + // the block time when we emit this event. expired_at should always <= now + now: u128, + }, + + PriceAlreadyMoved { + chain: ChainName, + exchange: Exchange, + asset_pair: AssetPair, + price: u128, + + // The target price the task set + target_price: u128, + }, + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Schedule task success. + TaskScheduled { + owner_id: AccountOf, + task_id: TaskId, + schedule_as: Option>, + }, + // an event when we're about to run the task + TaskTriggered { + owner_id: AccountOf, + task_id: TaskId, + condition: TaskCondition, + }, + // An event when the task ran succesfully + TaskExecuted { + owner_id: AccountOf, + task_id: TaskId, + }, + // An event when the task is trigger, ran but result in an error + TaskExecutionFailed { + owner_id: AccountOf, + task_id: TaskId, + error: DispatchError, + }, + // An event when the task is completed and removed from all of the queue + TaskCompleted { + owner_id: AccountOf, + task_id: TaskId, + }, + // An event when the task is cancelled, either by owner or by root + TaskCancelled { + owner_id: AccountOf, + task_id: TaskId, + }, + // An event whenever we expect a task but cannot find it + TaskNotFound { + owner_id: AccountOf, + task_id: TaskId, + }, + // An event when we are about to run task, but the task has expired right before + // it's actually run + TaskExpired { + owner_id: AccountOf, + task_id: TaskId, + condition: TaskCondition, + }, + // An event when we are proactively sweep expired task + // it's actually run + TaskSweep { + owner_id: AccountOf, + task_id: TaskId, + condition: TaskCondition, + }, + // An event happen in extreme case, where the chain is too busy, and there is pending task + // from previous block, and their respectively price has now moved against their matching + // target range + PriceAlreadyMoved { + owner_id: AccountOf, + task_id: TaskId, + condition: TaskCondition, + }, + AssetCreated { + chain: ChainName, + exchange: Exchange, + asset1: AssetName, + asset2: AssetName, + decimal: u8, + }, + AssetUpdated { + owner_id: AccountOf, + chain: ChainName, + exchange: Exchange, + asset1: AssetName, + asset2: AssetName, + price: u128, + }, + AssetDeleted { + chain: ChainName, + exchange: Exchange, + asset1: AssetName, + asset2: AssetName, + }, + } + + // #[pallet::hooks] + // impl Hooks> for Pallet { + // fn on_initialize(_: T::BlockNumber) -> Weight { + // if Self::is_shutdown() { + // return T::DbWeight::get().reads(1u64) + // } + + // let max_weight: Weight = Weight::from_parts( + // T::MaxWeightPercentage::get().mul_floor(T::MaxBlockWeight::get()), + // 0, + // ); + // Self::trigger_tasks(max_weight) + // } + + // fn on_idle(_: T::BlockNumber, remaining_weight: Weight) -> Weight { + // Self::sweep_expired_task(remaining_weight) + // } + // } + + #[pallet::call] + impl Pallet { + /// Initialize an asset + /// + /// Add a new asset + /// + /// # Parameters + /// * `asset`: asset type + /// * `target_price`: baseline price of the asset + /// * `upper_bound`: TBD - highest executable percentage increase for asset + /// * `lower_bound`: TBD - highest executable percentage decrease for asset + /// * `asset_owner`: owner of the asset + /// * `expiration_period`: how frequently the tasks for an asset should expire + /// + /// # Errors + #[pallet::call_index(1)] + #[pallet::weight(::WeightInfo::initialize_asset_extrinsic(asset_owners.len() as u32))] + #[transactional] + pub fn initialize_asset( + origin: OriginFor, + chain: Vec, + exchange: Vec, + asset1: AssetName, + asset2: AssetName, + decimal: u8, + asset_owners: Vec>, + ) -> DispatchResult { + // TODO: use sudo and remove this feature flag + // TODO: needs fees if opened up to non-sudo + // When enable dev-queue, we skip this check + #[cfg(not(feature = "dev-queue"))] + ensure_root(origin)?; + + Self::create_new_asset(chain, exchange, asset1, asset2, decimal, asset_owners)?; + + Ok(()) + } + + /// Update prices of multiple asset pairs at the same time + /// + /// Only authorized origin can update the price. The authorized origin is set when + /// initializing an asset. + /// + /// An asset is identified by this tuple: (chain, exchange, (asset1, asset2)). + /// + /// To support updating multiple pairs, each element of the tuple become a separate + /// argument to this function, where as each of these argument is a vector. + /// + /// Every element of each vector arguments, in the same position in the vector form the + /// above tuple. + /// + /// # Parameters + /// * `chains`: a vector of chain names + /// * `exchange`: a vector of exchange name + /// * `asset1`: a vector of asset1 name + /// * `asset2`: a vector of asset2 name + /// * `prices`: a vector of price of asset1, re-present in asset2 + /// * `submitted_at`: a vector of epoch. This epoch is the time when the price is recognized from the oracle provider + /// * `rounds`: a number to re-present which round of the asset price we're updating. Unused internally + #[pallet::call_index(2)] + #[pallet::weight(::WeightInfo::asset_price_update_extrinsic(assets1.len() as u32))] + #[transactional] + pub fn update_asset_prices( + origin: OriginFor, + chains: Vec, + exchanges: Vec, + assets1: Vec, + assets2: Vec, + prices: Vec, + submitted_at: Vec, + rounds: Vec, + ) -> DispatchResult { + let owner_id = ensure_signed(origin)?; + + let current_block_time = Self::get_current_block_time(); + if current_block_time.is_err() { + Err(Error::::BlockTimeNotSet)? + } + + let now = current_block_time.unwrap() as u128; + + if !(chains.len() == exchanges.len() + && exchanges.len() == assets1.len() + && assets1.len() == assets2.len() + && assets2.len() == prices.len() + && prices.len() == submitted_at.len() + && submitted_at.len() == rounds.len()) + { + Err(Error::::AssetUpdatePayloadMalform)? + } + + for (index, price) in prices.clone().iter().enumerate() { + let chain = chains[index].clone(); + let exchange = exchanges[index].clone(); + let asset1 = assets1[index].clone(); + let asset2 = assets2[index].clone(); + let round = rounds[index]; + + let key = (&chain, &exchange, (&asset1, &asset2)); + + if !AssetRegistry::::contains_key(&key) { + Err(Error::::AssetNotInitialized)? + } + + if let Some(asset_registry) = Self::get_asset_registry_info(key) { + let allow_wallets: Vec> = asset_registry.oracle_providers; + if !allow_wallets.contains(&owner_id) { + Err(Error::::OracleNotAuthorized)? + } + + // TODO: Eventually we will need to handle submitted_at and round properly when + // we had more than one oracle + // Currently not doing that check for the simplicity shake of interface + let this_round = match Self::get_asset_price_data(key) { + Some(previous_price) => previous_price.round + 1, + None => round, + }; + + PriceRegistry::::insert( + &key, + PriceData { + round: this_round, + updated_at: now, + value: *price, + }, + ); + + Self::deposit_event(Event::AssetUpdated { + owner_id: owner_id.clone(), + chain, + exchange, + asset1, + asset2, + price: *price, + }); + } + } + Ok(()) + } + + /// Delete an asset. Delete may not happen immediately if there was task scheduled for + /// this asset. Upon + /// + /// # Parameters + /// * `asset`: asset type + /// * `directions`: number of directions of data input. (up, down, ?) + /// + /// # Errors + #[pallet::call_index(3)] + #[pallet::weight(::WeightInfo::initialize_asset_extrinsic(1))] + #[transactional] + pub fn delete_asset( + origin: OriginFor, + chain: ChainName, + exchange: Exchange, + asset1: AssetName, + asset2: AssetName, + ) -> DispatchResult { + // TODO: use sudo and remove this feature flag + // When enable dev queue, we want to skip this root check so local development can + // happen easier + #[cfg(not(feature = "dev-queue"))] + ensure_root(origin)?; + + let key = (&chain, &exchange, (&asset1, &asset2)); + if let Some(_asset_info) = Self::get_asset_registry_info(key) { + AssetRegistry::::remove(&key); + PriceRegistry::::remove(&key); + Self::deposit_event(Event::AssetDeleted { + chain, + exchange, + asset1, + asset2, + }); + } else { + Err(Error::::AssetNotSupported)? + } + Ok(()) + } + + #[pallet::call_index(4)] + #[pallet::weight(::WeightInfo::schedule_xcmp_task_extrinsic())] + #[transactional] + pub fn schedule_xcmp_task( + origin: OriginFor, + chain: ChainName, + exchange: Exchange, + asset1: AssetName, + asset2: AssetName, + expired_at: u128, + trigger_function: Vec, + trigger_param: Vec, + destination: Box, + schedule_fee: Box, + execution_fee: Box, + encoded_call: Vec, + encoded_call_weight: Weight, + overall_weight: Weight, + ) -> DispatchResult { + // Step 1: + // Build Task and put it into the task registry + // Step 2: + // Put task id on the index + // TODO: the value to be inserted into the BTree should come from a function that + // extract value from param + // + // TODO: HANDLE FEE to see user can pay fee + let owner_id = ensure_signed(origin)?; + let task_id = Self::generate_task_id(); + + let destination = + Location::try_from(*destination).map_err(|()| Error::::BadVersion)?; + let schedule_fee = + Location::try_from(*schedule_fee).map_err(|()| Error::::BadVersion)?; + + let action = Action::XCMP { + destination, + schedule_fee, + execution_fee: *execution_fee, + encoded_call, + encoded_call_weight, + overall_weight, + schedule_as: None, + instruction_sequence: InstructionSequence::PayThroughSovereignAccount, + }; + + let task: Task = Task:: { + owner_id, + task_id, + chain, + exchange, + asset_pair: (asset1, asset2), + expired_at, + trigger_function, + trigger_params: trigger_param, + action, + }; + + Self::validate_and_schedule_task(task)?; + Ok(()) + } + + /// Schedule a task through XCMP through proxy account to fire an XCMP message with a provided call. + /// + /// Before the task can be scheduled the task must past validation checks. + /// * The transaction is signed + /// * The asset pair is already initialized + /// + /// # Parameters + /// * `chain`: The chain name where we will send the task over + /// * `exchange`: the exchange name where we + /// * `asset1`: The payment asset location required for scheduling automation task. + /// * `asset2`: The fee will be paid for XCMP execution. + /// * `expired_at`: the epoch when after that time we will remove the task if it has not been executed yet + /// * `trigger_function`: currently only support `gt` or `lt`. Essentially mean greater than or less than. + /// * `trigger_params`: a list of parameter to feed into `trigger_function`. with `gt` and `lt` we only need to pass the target price as a single element vector + /// * `schedule_fee`: The payment asset location required for scheduling automation task. + /// * `execution_fee`: The fee will be paid for XCMP execution. + /// * `encoded_call`: Call that will be sent via XCMP to the parachain id provided. + /// * `encoded_call_weight`: Required weight at most the provided call will take. + /// * `overall_weight`: The overall weight in which fees will be paid for XCM instructions. + #[pallet::call_index(5)] + #[pallet::weight(::WeightInfo::schedule_xcmp_task_extrinsic().saturating_add(T::DbWeight::get().reads(1)))] + #[transactional] + pub fn schedule_xcmp_task_through_proxy( + origin: OriginFor, + chain: ChainName, + exchange: Exchange, + asset1: AssetName, + asset2: AssetName, + expired_at: u128, + trigger_function: Vec, + trigger_params: Vec, + + destination: Box, + schedule_fee: Box, + execution_fee: Box, + encoded_call: Vec, + encoded_call_weight: Weight, + overall_weight: Weight, + schedule_as: T::AccountId, + ) -> DispatchResult { + let owner_id = ensure_signed(origin)?; + + // Make sure the owner is the proxy account of the user account. + T::EnsureProxy::ensure_ok(schedule_as.clone(), owner_id.clone())?; + + let destination = + Location::try_from(*destination).map_err(|()| Error::::BadVersion)?; + let schedule_fee = + Location::try_from(*schedule_fee).map_err(|()| Error::::BadVersion)?; + + let action = Action::XCMP { + destination, + schedule_fee, + execution_fee: *execution_fee, + encoded_call, + encoded_call_weight, + overall_weight, + schedule_as: Some(schedule_as), + instruction_sequence: InstructionSequence::PayThroughRemoteDerivativeAccount, + }; + + let task_id = Self::generate_task_id(); + let task: Task = Task:: { + owner_id, + task_id, + chain, + exchange, + asset_pair: (asset1, asset2), + expired_at, + trigger_function, + trigger_params, + action, + }; + + Self::validate_and_schedule_task(task)?; + Ok(()) + } + + // When cancel task we remove it from: + // Task Registry + // SortedTasksIndex + // AccountTasks + // Task Queue: if the task is already on the queue but haven't got run yet, + // we will attemppt to remove it + #[pallet::call_index(6)] + #[pallet::weight(::WeightInfo::cancel_task_extrinsic())] + #[transactional] + pub fn cancel_task(origin: OriginFor, task_id: TaskId) -> DispatchResult { + let owner_id = ensure_signed(origin)?; + + if let Some(task) = Self::get_task(&owner_id, &task_id) { + Self::remove_task( + &task, + Some(Event::TaskCancelled { + owner_id: task.owner_id.clone(), + task_id: task.task_id.clone(), + }), + ); + } else { + Err(Error::::TaskNotFound)? + } + + Ok(()) + } + } + + impl Pallet { + pub fn generate_task_id() -> TaskId { + let current_block_number = + TryInto::::try_into(>::block_number()) + .ok() + .unwrap_or(0); + + let tx_id = >::extrinsic_index().unwrap_or(0); + + let evt_index = >::event_count(); + + format!("{:}-{:}-{:}", current_block_number, tx_id, evt_index) + .as_bytes() + .to_vec() + } + + // Move task from the SortedTasksIndex into TaskQueue that are ready to be process + pub fn shift_tasks(max_weight: Weight) -> Weight { + let weight_left: Weight = max_weight; + + // TODO: Look into asset that has price move instead + let task_to_process: &mut TaskIdList = &mut Vec::new(); + + for key in SortedTasksIndex::::iter_keys() { + let (chain, exchange, asset_pair, trigger_func) = key.clone(); + + // TODO: Swap asset to check pair + let current_price_wrap = + Self::get_asset_price_data((&chain, &exchange, &asset_pair)); + + if current_price_wrap.is_none() { + continue; + }; + // Example: sell orders + // + // In the list we had tasks such as + // - task1: sell when price > 10 + // - task2: sell when price > 20 + // - task3: sell when price > 30 + // If price used to be 5, and now it's 15, task1 got run + // If price used to be 5, and now it's 25, task1 and task2 got run + // If price used to be 5, and now it's 35, all tasks are run + // + // Example: buy orders + // + // In the list we had tasks such as + // - task1: buy when price < 10 + // - task2: buy when price < 20 + // - task3: buy when price < 30 + // If price used to be 500, and now it's 25, task3 got run + // If price used to be 500, and now it's 15, task2 and task3 got run + // If price used to be 500, and now it's 5, all tasks are run + // + // TODO: handle atomic and transaction + if let Some(mut tasks) = Self::get_sorted_tasks_index(&key) { + let current_price = current_price_wrap.unwrap(); + + for (&price, task_ids) in + (tasks.clone()).range(range_by_trigger_func(&trigger_func, ¤t_price)) + { + // Remove because we map this into task queue + tasks.remove(&price); + let t = &mut (&mut (task_ids.clone())); + task_to_process.append(t); + } + + // all tasks are moved to process, delete the queue + if tasks.is_empty() { + SortedTasksIndex::::remove(&key); + } else { + SortedTasksIndex::::insert(&key, tasks); + } + } + } + + if !task_to_process.is_empty() { + if TaskQueue::::exists() { + let mut old_task = TaskQueue::::get(); + old_task.append(task_to_process); + TaskQueue::::put(old_task); + } else { + TaskQueue::::put(task_to_process); + }; + } + + weight_left + } + + /// Trigger tasks for the block time. + /// + /// Complete as many tasks as possible given the maximum weight. + pub fn trigger_tasks(max_weight: Weight) -> Weight { + let mut weight_left: Weight = max_weight; + let check_time_and_deletion_weight = T::DbWeight::get().reads(2u64); + if weight_left.ref_time() < check_time_and_deletion_weight.ref_time() { + return weight_left; + } + + Self::shift_tasks(weight_left); + + // Now we can run those tasks + // TODO: We need to calculate enough weight and balance the tasks so we won't be skew + // by a particular kind of task asset + // + // Now we run as much task as possible + // If weight is over, task will be picked up next time + // If the price is no longer matched, they will be put back into the TaskRegistry + let task_queue = Self::get_task_queue(); + + weight_left = weight_left + // for above read + .saturating_sub(T::DbWeight::get().reads(1u64)) + // For measuring the TaskQueue::::put(tasks_left); + .saturating_sub(T::DbWeight::get().writes(1u64)); + if !task_queue.is_empty() { + let (tasks_left, new_weight_left) = Self::run_tasks(task_queue, weight_left); + weight_left = new_weight_left; + TaskQueue::::put(tasks_left); + } + + weight_left + } + + pub fn create_new_asset( + chain: ChainName, + exchange: Exchange, + asset1: AssetName, + asset2: AssetName, + decimal: u8, + asset_owners: Vec>, + ) -> Result<(), DispatchError> { + let key = (&chain, &exchange, (&asset1, &asset2)); + + if AssetRegistry::::contains_key(&key) { + Err(Error::::AssetAlreadyInitialized)? + } + + let asset_info = RegistryInfo:: { + decimal, + round: 0, + last_update: 0, + oracle_providers: asset_owners, + }; + + AssetRegistry::::insert(key, asset_info); + + Self::deposit_event(Event::AssetCreated { + chain, + exchange, + asset1, + asset2, + decimal, + }); + Ok(()) + } + + pub fn get_current_time_slot() -> Result> { + let now = >::get().saturated_into::(); + if now == 0 { + Err(Error::::BlockTimeNotSet)? + } + let now = now.saturating_div(1000); + let diff_to_min = now % 60; + Ok(now.saturating_sub(diff_to_min)) + } + + pub fn run_xcmp_task( + destination: Location, + caller: T::AccountId, + fee: AssetPayment, + encoded_call: Vec, + encoded_call_weight: Weight, + overall_weight: Weight, + flow: InstructionSequence, + ) -> (Weight, Option) { + let fee_asset_location = Location::try_from(fee.asset_location); + if fee_asset_location.is_err() { + return ( + ::WeightInfo::run_xcmp_task(), + Some(Error::::BadVersion.into()), + ); + } + let fee_asset_location = fee_asset_location.unwrap(); + + match T::XcmpTransactor::transact_xcm( + destination, + fee_asset_location, + fee.amount, + caller, + encoded_call, + encoded_call_weight, + overall_weight, + flow, + ) { + Ok(()) => (::WeightInfo::run_xcmp_task(), None), + Err(e) => (::WeightInfo::run_xcmp_task(), Some(e)), + } + } + + // return epoch time of current block + pub fn get_current_block_time() -> Result { + let now = >::get() + .checked_into::() + .ok_or(ArithmeticError::Overflow)?; + + if now == 0 { + Err(Error::::BlockTimeNotSet)?; + } + + let now = now.checked_div(1000).ok_or(ArithmeticError::Overflow)?; + Ok(now) + } + + // Check whether a task can run or not based on its expiration and price. + // + // A task can be queued but got expired when it's about to run, in that case, we don't want + // it to be run. + // + // Or the price might move by the time task is invoked, we don't want it to get run either. + fn task_can_run(task: &Task) -> (Option, Weight) { + let mut consumed_weight: Weight = Weight::zero(); + + // If we cannot extract time from the block, then somthing horrible wrong, let not move + // forward + let current_block_time = Self::get_current_block_time(); + if current_block_time.is_err() { + return (None, consumed_weight); + } + + let now = current_block_time.unwrap(); + + if task.expired_at < now.into() { + consumed_weight = + consumed_weight.saturating_add(::WeightInfo::emit_event()); + + Self::deposit_event(Event::TaskExpired { + owner_id: task.owner_id.clone(), + task_id: task.task_id.clone(), + condition: TaskCondition::AlreadyExpired { + expired_at: task.expired_at, + now: now.into(), + }, + }); + + return (None, consumed_weight); + } + + // read storage once to get the price + consumed_weight = consumed_weight.saturating_add(T::DbWeight::get().reads(1u64)); + if let Some(this_task_asset_price) = + Self::get_asset_price_data((&task.chain, &task.exchange, &task.asset_pair)) + { + if task.is_price_condition_match(&this_task_asset_price) { + return ( + Some(TaskCondition::TargetPriceMatched { + chain: task.chain.clone(), + exchange: task.exchange.clone(), + asset_pair: task.asset_pair.clone(), + price: this_task_asset_price.value, + }), + consumed_weight, + ); + } else { + Self::deposit_event(Event::PriceAlreadyMoved { + owner_id: task.owner_id.clone(), + task_id: task.task_id.clone(), + condition: TaskCondition::PriceAlreadyMoved { + chain: task.chain.clone(), + exchange: task.exchange.clone(), + asset_pair: task.asset_pair.clone(), + price: this_task_asset_price.value, + + target_price: task.trigger_params[0], + }, + }); + + return (None, consumed_weight); + } + } + + // This happen because we cannot find the price, so the task cannot be run + (None, consumed_weight) + } + + /// Runs as many tasks as the weight allows from the provided vec of task_ids. + /// + /// Returns a vec with the tasks that were not run and the remaining weight. + pub fn run_tasks( + mut task_ids: TaskIdList, + mut weight_left: Weight, + ) -> (TaskIdList, Weight) { + let mut consumed_task_index: usize = 0; + + // If we cannot extract time from the block, then somthing horrible wrong, let not move + // forward + let current_block_time = Self::get_current_block_time(); + if current_block_time.is_err() { + return (task_ids, weight_left); + } + + let _now = current_block_time.unwrap(); + + for (owner_id, task_id) in task_ids.iter() { + consumed_task_index.saturating_inc(); + + let action_weight = match Self::get_task(owner_id, task_id) { + None => { + Self::deposit_event(Event::TaskNotFound { + owner_id: owner_id.clone(), + task_id: task_id.clone(), + }); + ::WeightInfo::emit_event() + } + Some(task) => { + let (task_condition, test_can_run_weight) = Self::task_can_run(&task); + + if task_condition.is_none() { + test_can_run_weight + } else { + Self::deposit_event(Event::TaskTriggered { + owner_id: task.owner_id.clone(), + task_id: task.task_id.clone(), + condition: task_condition.unwrap(), + }); + + let _total_task = + Self::get_task_stat(StatType::TotalTasksOverall).map_or(0, |v| v); + let _total_task_per_account = Self::get_account_stat( + &task.owner_id, + StatType::TotalTasksPerAccount, + ) + .map_or(0, |v| v); + + let (task_action_weight, task_dispatch_error) = + match task.action.clone() { + Action::XCMP { + destination, + execution_fee, + schedule_as, + encoded_call, + encoded_call_weight, + overall_weight, + instruction_sequence, + .. + } => Self::run_xcmp_task( + destination, + schedule_as.unwrap_or(task.owner_id.clone()), + execution_fee, + encoded_call, + encoded_call_weight, + overall_weight, + instruction_sequence, + ), + }; + + Self::remove_task(&task, None); + + if let Some(err) = task_dispatch_error { + Self::deposit_event(Event::::TaskExecutionFailed { + owner_id: task.owner_id.clone(), + task_id: task.task_id.clone(), + error: err, + }); + } else { + Self::deposit_event(Event::::TaskExecuted { + owner_id: task.owner_id.clone(), + task_id: task.task_id.clone(), + }); + } + + Self::deposit_event(Event::::TaskCompleted { + owner_id: task.owner_id.clone(), + task_id: task.task_id.clone(), + }); + + task_action_weight + .saturating_add(T::DbWeight::get().writes(1u64)) + .saturating_add(T::DbWeight::get().reads(1u64)) + } + } + }; + + weight_left = weight_left.saturating_sub(action_weight); + + let run_another_task_weight = ::WeightInfo::emit_event() + .saturating_add(T::DbWeight::get().writes(1u64)) + .saturating_add(T::DbWeight::get().reads(1u64)); + if weight_left.ref_time() < run_another_task_weight.ref_time() { + break; + } + } + + if consumed_task_index == task_ids.len() { + (vec![], weight_left) + } else { + (task_ids.split_off(consumed_task_index), weight_left) + } + } + + // Handle task removal. There are a few places task need to be remove: + // - Tasks storage + // - TaskQueue if the task is already queued + // - TaskStats: decrease task count + // - AccountStats: decrease task count + // - SortedTasksIndex: sorted task by price + // - SortedTasksByExpiration: sorted task by expired epch + pub fn remove_task(task: &Task, event: Option>) { + Tasks::::remove(task.owner_id.clone(), task.task_id.clone()); + + // Remove it from SortedTasksIndex + let key = ( + &task.chain, + &task.exchange, + &task.asset_pair, + &task.trigger_function, + ); + if let Some(mut sorted_tasks_by_price) = Self::get_sorted_tasks_index(key) { + if let Some(tasks) = sorted_tasks_by_price.get_mut(&task.trigger_params[0]) { + if let Some(pos) = tasks.iter().position(|x| { + let (_, task_id) = x; + *task_id == task.task_id + }) { + tasks.remove(pos); + } + + if tasks.is_empty() { + // if there is no more task on this slot, clear it up + sorted_tasks_by_price.remove(&task.trigger_params[0].clone()); + } + SortedTasksIndex::::insert(&key, sorted_tasks_by_price); + } + } + + // Remove it from the SortedTasksByExpiration + SortedTasksByExpiration::::mutate(|sorted_tasks_by_expiration| { + if let Some(expired_task_slot) = + sorted_tasks_by_expiration.get_mut(&task.expired_at) + { + expired_task_slot.remove(&task.task_id); + if expired_task_slot.is_empty() { + sorted_tasks_by_expiration.remove(&task.expired_at); + } + } + }); + + // Update metrics + let total_task = Self::get_task_stat(StatType::TotalTasksOverall).map_or(0, |v| v); + let total_task_per_account = + Self::get_account_stat(&task.owner_id, StatType::TotalTasksPerAccount) + .map_or(0, |v| v); + + if total_task >= 1 { + TaskStats::::insert(StatType::TotalTasksOverall, total_task - 1); + } + + if total_task_per_account >= 1 { + AccountStats::::insert( + task.owner_id.clone(), + StatType::TotalTasksPerAccount, + total_task_per_account - 1, + ); + } + + if let Some(e) = event { + Self::deposit_event(e); + } + } + + // Sweep as mucht ask we can and return the remaining weight + pub fn sweep_expired_task(remaining_weight: Weight) -> Weight { + if remaining_weight.ref_time() <= T::DbWeight::get().reads(1u64).ref_time() { + // Weight too low, not enough to do anything useful + return remaining_weight; + } + + let current_block_time = Self::get_current_block_time(); + + if current_block_time.is_err() { + // Cannot get time, this probably is the first block + return remaining_weight; + } + + let now = current_block_time.unwrap() as u128; + + // At the end we will most likely need to write back the updated storage, so here we + // account for that write + let mut unused_weight = remaining_weight + .saturating_sub(T::DbWeight::get().reads(1u64)) + .saturating_sub(T::DbWeight::get().writes(1u64)); + let mut tasks_by_expiration = Self::get_sorted_tasks_by_expiration(); + + let mut expired_shards: Vec = vec![]; + // Use Included(now) because if this task has not run at the end of this block, then + // that mean at next block it for sure will expired + 'outer: for (expired_time, task_ids) in + tasks_by_expiration.range_mut((Included(&0_u128), Included(&now))) + { + for (task_id, owner_id) in task_ids.iter() { + if unused_weight.ref_time() + > T::DbWeight::get() + .reads(1u64) + .saturating_add(::WeightInfo::remove_task()) + .ref_time() + { + unused_weight = unused_weight + .saturating_sub(T::DbWeight::get().reads(1u64)) + .saturating_sub(::WeightInfo::remove_task()); + + // Now let remove the task from chain storage + if let Some(task) = Self::get_task(owner_id, task_id) { + Self::remove_task( + &task, + Some(Event::TaskSweep { + owner_id: task.owner_id.clone(), + task_id: task.task_id.clone(), + condition: TaskCondition::AlreadyExpired { + expired_at: task.expired_at, + now, + }, + }), + ); + } + } else { + // If there is not enough weight left, break all the way out, we had + // already save one weight for the write to update storage back + break 'outer; + } + } + expired_shards.push(*expired_time); + } + + unused_weight + } + + // Task is write into a sorted storage, re-present by BTreeMap so we can find and expired them + pub fn track_expired_task(task: &Task) -> Result> { + // first we got back the reference to the underlying storage + // perform relevant update to write task to the right shard by expired + // time, then the value is store back to storage + let mut tasks_by_expiration = Self::get_sorted_tasks_by_expiration(); + + if let Some(task_shard) = tasks_by_expiration.get_mut(&task.expired_at) { + task_shard.insert(task.task_id.clone(), task.owner_id.clone()); + } else { + tasks_by_expiration.insert( + task.expired_at, + BTreeMap::from([(task.task_id.clone(), task.owner_id.clone())]), + ); + } + SortedTasksByExpiration::::put(tasks_by_expiration); + + Ok(true) + } + + /// With transaction will protect against a partial success where N of M execution times might be full, + /// rolling back any successful insertions into the schedule task table. + /// Validate and schedule task. + /// This will also charge the execution fee. + /// TODO: double check atomic + pub fn validate_and_schedule_task(task: Task) -> Result<(), Error> { + if task.task_id.is_empty() { + Err(Error::::InvalidTaskId)? + } + + let current_block_time = Self::get_current_block_time(); + if current_block_time.is_err() { + // Cannot get time, this probably is the first block + Err(Error::::BlockTimeNotSet)? + } + + let now = current_block_time.unwrap() as u128; + + if task.expired_at <= now { + Err(Error::::InvalidTaskExpiredAt)? + } + + let total_task = Self::get_task_stat(StatType::TotalTasksOverall).map_or(0, |v| v); + let total_task_per_account = + Self::get_account_stat(&task.owner_id, StatType::TotalTasksPerAccount) + .map_or(0, |v| v); + + // check task total limit per account and overall + if total_task >= T::MaxTasksOverall::get().into() { + Err(Error::::MaxTasksReached)? + } + + // check task total limit per account and overall + if total_task_per_account >= T::MaxTasksPerAccount::get().into() { + Err(Error::::MaxTasksPerAccountReached)? + } + + match task.action.clone() { + Action::XCMP { + execution_fee, + instruction_sequence, + .. + } => { + let asset_location = Location::try_from(execution_fee.asset_location) + .map_err(|()| Error::::BadVersion)?; + let asset_location = asset_location + .reanchored( + &Location::new(1, Parachain(T::SelfParaId::get().into())), + &T::UniversalLocation::get(), + ) + .map_err(|_| Error::::CannotReanchor)?; + // Only native token are supported as the XCMP fee for local deductions + if instruction_sequence == InstructionSequence::PayThroughSovereignAccount + && asset_location != Location::new(0, Here) + { + Err(Error::::UnsupportedFeePayment)? + } + } + }; + + let fee_result = T::FeeHandler::pay_checked_fees_for( + &(task.owner_id.clone()), + &(task.action.clone()), + || { + Tasks::::insert(task.owner_id.clone(), task.task_id.clone(), &task); + + // Post task processing, increase relevant metrics data + TaskStats::::insert(StatType::TotalTasksOverall, total_task + 1); + AccountStats::::insert( + task.owner_id.clone(), + StatType::TotalTasksPerAccount, + total_task_per_account + 1, + ); + + let key = ( + &task.chain, + &task.exchange, + &task.asset_pair, + &task.trigger_function, + ); + + if let Some(mut sorted_task_index) = Self::get_sorted_tasks_index(key) { + // TODO: remove hard code and take right param + if let Some(tasks_by_price) = + sorted_task_index.get_mut(&(task.trigger_params[0])) + { + tasks_by_price.push((task.owner_id.clone(), task.task_id.clone())); + } else { + sorted_task_index.insert( + task.trigger_params[0], + vec![(task.owner_id.clone(), task.task_id.clone())], + ); + } + SortedTasksIndex::::insert(key, sorted_task_index); + } else { + let mut sorted_task_index = BTreeMap::>::new(); + sorted_task_index.insert( + task.trigger_params[0], + vec![(task.owner_id.clone(), task.task_id.clone())], + ); + + // TODO: sorted based on trigger_function comparison of the parameter + // then at the time of trigger we cut off all the left part of the tree + SortedTasksIndex::::insert(key, sorted_task_index); + }; + + Ok(()) + }, + ); + + if fee_result.is_err() { + Err(Error::::FeePaymentError)? + } + + if Self::track_expired_task(&task).is_err() { + Err(Error::::TaskExpiredStorageFailedToUpdate)? + } + + let schedule_as = match task.action.clone() { + Action::XCMP { schedule_as, .. } => schedule_as, + }; + + Self::deposit_event(Event::TaskScheduled { + owner_id: task.owner_id, + task_id: task.task_id, + schedule_as, + }); + Ok(()) + } + + /// Calculates the execution fee for a given action based on weight and num of executions + /// + /// Fee saturates at Weight/BalanceOf when there are an unreasonable num of executions + /// In practice, executions is bounded by T::MaxExecutionTimes and unlikely to saturate + pub fn calculate_schedule_fee_amount( + action: &ActionOf, + ) -> Result, DispatchError> { + let total_weight = action.execution_weight::()?; + + let schedule_fee_location = action.schedule_fee_location::(); + let schedule_fee_location = schedule_fee_location + .reanchored( + &Location::new(1, Parachain(T::SelfParaId::get().into())), + &T::UniversalLocation::get(), + ) + .map_err(|_| Error::::CannotReanchor)?; + + let fee = if schedule_fee_location == Location::default() { + T::ExecutionWeightFee::get() + .saturating_mul(>::saturated_from(total_weight)) + } else { + let raw_fee = + T::FeeConversionRateProvider::get_fee_per_second(&schedule_fee_location) + .ok_or("CouldNotDetermineFeePerSecond")? + .checked_mul(total_weight as u128) + .ok_or("FeeOverflow") + .map(|raw_fee| raw_fee / (WEIGHT_REF_TIME_PER_SECOND as u128))?; + >::saturated_from(raw_fee) + }; + + Ok(fee) + } + } +} diff --git a/pallets/automation-price/src/mock.rs b/pallets/automation-price/src/mock.rs new file mode 100644 index 000000000..9ad954b39 --- /dev/null +++ b/pallets/automation-price/src/mock.rs @@ -0,0 +1,563 @@ +// This file is part of Ava Protocol. + +// Copyright (C) 2022 Ava Protocol +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate as pallet_automation_price; +use crate::TaskId; + +use ava_protocol_primitives::EnsureProxy; +use frame_support::{ + assert_ok, construct_runtime, parameter_types, + traits::{ConstU32, Everything}, + weights::Weight, + PalletId, +}; +use frame_system::{self as system, RawOrigin}; +use orml_traits::parameter_type_with_key; +use sp_core::H256; +use sp_runtime::{ + traits::{AccountIdConversion, BlakeTwo256, Convert, IdentityLookup}, + AccountId32, BuildStorage, Perbill, +}; +use sp_std::{marker::PhantomData, vec::Vec}; +use staging_xcm::latest::{prelude::*, Junctions::*}; + +type Block = system::mocking::MockBlock; + +use crate::weights::WeightInfo; + +pub type Balance = u128; +pub type AccountId = AccountId32; +pub type CurrencyId = u32; + +pub const ALICE: [u8; 32] = [1u8; 32]; +pub const BOB: [u8; 32] = [2u8; 32]; +pub const DELEGATOR_ACCOUNT: [u8; 32] = [3u8; 32]; +pub const PROXY_ACCOUNT: [u8; 32] = [4u8; 32]; + +pub const PARA_ID: u32 = 2000; +pub const NATIVE: CurrencyId = 0; +pub const NATIVE_LOCATION: Location = Location { + parents: 0, + interior: Here, +}; +pub const NATIVE_EXECUTION_WEIGHT_FEE: u128 = 12; +pub const FOREIGN_CURRENCY_ID: CurrencyId = 1; + +pub fn get_moonbase_asset_location() -> Location { + Location { + parents: 1, + interior: X2([Parachain(1000u32), PalletInstance(3u8)].into()), + } +} + +pub const EXCHANGE1: &[u8] = "EXCHANGE1".as_bytes(); + +pub const CHAIN1: &[u8] = "KUSAMA".as_bytes(); +pub const CHAIN2: &[u8] = "DOT".as_bytes(); + +pub const ASSET1: &[u8] = "TUR".as_bytes(); +pub const ASSET2: &[u8] = "USDC".as_bytes(); +pub const ASSET3: &[u8] = "KSM".as_bytes(); +pub const MOCK_XCMP_FEE: u128 = 10_000_000_u128; + +construct_runtime!( + pub enum Test + { + System: system, + Timestamp: pallet_timestamp, + Balances: pallet_balances, + ParachainInfo: parachain_info, + Tokens: orml_tokens, + Currencies: orml_currencies, + AutomationPrice: pallet_automation_price, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 51; +} + +// impl system::Config for Test { +// type BaseCallFilter = Everything; +// type BlockWeights = (); +// type BlockLength = (); +// type DbWeight = (); +// type RuntimeOrigin = RuntimeOrigin; +// type RuntimeCall = RuntimeCall; +// type Index = u64; +// type BlockNumber = u64; +// type Hash = H256; +// type Hashing = BlakeTwo256; +// type AccountId = AccountId32; +// type Lookup = IdentityLookup; +// type Header = Header; +// type RuntimeEvent = RuntimeEvent; +// //type RuntimeEvent = From> + IsType<::RuntimeEvent>; +// type BlockHashCount = BlockHashCount; +// type Version = (); +// type PalletInfo = PalletInfo; +// type AccountData = pallet_balances::AccountData; +// type OnNewAccount = (); +// type OnKilledAccount = (); +// type SystemWeightInfo = (); +// type SS58Prefix = SS58Prefix; +// type OnSetCode = (); +// type MaxConsumers = frame_support::traits::ConstU32<16>; +// } + +impl system::Config for Test { + type BaseCallFilter = Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Nonce = u64; + type Block = Block; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId32; + type Lookup = IdentityLookup; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = SS58Prefix; + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; + type RuntimeTask = (); + type SingleBlockMigrations = (); + type MultiBlockMigrator = (); + type PreInherents = (); + type PostInherents = (); + type PostTransactions = (); +} + +parameter_types! { + pub const ExistentialDeposit: u64 = 1; + pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 50; +} +impl pallet_balances::Config for Test { + type MaxLocks = MaxLocks; + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; + type FreezeIdentifier = (); + type MaxFreezes = ConstU32<0>; + type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); + type WeightInfo = (); +} + +impl parachain_info::Config for Test {} + +parameter_type_with_key! { + pub ExistentialDeposits: |_currency_id: CurrencyId| -> Balance { + Default::default() + }; +} +parameter_types! { + pub DustAccount: AccountId = PalletId(*b"auto/dst").into_account_truncating(); +} + +impl orml_tokens::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type Amount = i64; + type CurrencyId = CurrencyId; + type WeightInfo = (); + type ExistentialDeposits = ExistentialDeposits; + type CurrencyHooks = (); + type MaxLocks = ConstU32<100_000>; + type MaxReserves = ConstU32<100_000>; + type ReserveIdentifier = [u8; 8]; + type DustRemovalWhitelist = frame_support::traits::Nothing; +} + +impl orml_currencies::Config for Test { + type MultiCurrency = Tokens; + type NativeCurrency = AdaptedBasicCurrency; + type GetNativeCurrencyId = GetNativeCurrencyId; + type WeightInfo = (); +} +pub type AdaptedBasicCurrency = orml_currencies::BasicCurrencyAdapter; + +parameter_types! { + pub const MinimumPeriod: u64 = 1000; +} + +impl pallet_timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); +} + +impl pallet_automation_price::Config for Test { + type RuntimeEvent = RuntimeEvent; + type MaxTasksPerSlot = MaxTasksPerSlot; + type MaxTasksPerAccount = MaxTasksPerAccount; + type MaxTasksOverall = MaxTasksOverall; + type MaxBlockWeight = MaxBlockWeight; + type MaxWeightPercentage = MaxWeightPercentage; + type WeightInfo = MockWeight; + type ExecutionWeightFee = ExecutionWeightFee; + type CurrencyId = CurrencyId; + type MultiCurrency = Currencies; + type Currency = Balances; + type CurrencyIdConvert = MockTokenIdConvert; + type FeeHandler = FeeHandler; + type FeeConversionRateProvider = MockConversionRateProvider; + type UniversalLocation = UniversalLocation; + type SelfParaId = parachain_info::Pallet; + type XcmpTransactor = MockXcmpTransactor; + + type EnsureProxy = MockEnsureProxy; +} + +parameter_types! { + pub const MaxTasksPerSlot: u32 = 2; + // Mock value, purposely set to a small number so easiser to test limit reached + pub const MaxTasksOverall: u32 = 1024; + pub const MaxTasksPerAccount: u32 = 16; + #[derive(Debug)] + pub const MaxScheduleSeconds: u64 = 24 * 60 * 60; + pub const MaxBlockWeight: u64 = 20_000_000; + pub const MaxWeightPercentage: Perbill = Perbill::from_percent(40); + pub const ExecutionWeightFee: Balance = NATIVE_EXECUTION_WEIGHT_FEE; + + // When unit testing dynamic dispatch, we use the real weight value of the extrinsics call + // This is an external lib that we don't own so we try to not mock, follow the rule don't mock + // what you don't own + // One of test we do is Balances::transfer call, which has its weight define here: + // https://github.com/paritytech/polkadot-sdk/blob/polkadot-v0.9.38/frame/balances/src/weights.rs#L61-L73 + // When logging the final calculated amount, its value is 73_314_000. + // + // in our unit test, we test a few transfers with dynamic dispatch. On top + // of that, there is also weight of our call such as fetching the tasks, + // move from schedule slot to tasks queue,.. so the weight of a schedule + // transfer with dynamic dispatch is even higher. + // + // and because we test run a few of them so I set it to ~10x value of 73_314_000 + pub const MaxWeightPerSlot: u128 = 700_000_000; + pub const XmpFee: u128 = 1_000_000; + pub const GetNativeCurrencyId: CurrencyId = NATIVE; +} + +pub struct MockWeight(PhantomData); +impl pallet_automation_price::WeightInfo for MockWeight { + fn emit_event() -> Weight { + Weight::from_parts(20_000_000_u64, 0u64) + } + + fn asset_price_update_extrinsic(v: u32) -> Weight { + Weight::from_parts(220_000_000_u64 * v as u64, 0u64) + } + + fn initialize_asset_extrinsic(_v: u32) -> Weight { + Weight::from_parts(220_000_000_u64, 0u64) + } + + fn schedule_xcmp_task_extrinsic() -> Weight { + Weight::from_parts(24_000_000_u64, 0u64) + } + + fn cancel_task_extrinsic() -> Weight { + Weight::from_parts(20_000_000_u64, 0u64) + } + + fn run_xcmp_task() -> Weight { + Weight::from_parts(200_000_000_u64, 0u64) + } + + fn remove_task() -> Weight { + Weight::from_parts(20_000_000_u64, 0u64) + } +} + +pub struct MockXcmpTransactor(PhantomData<(T, C)>); +impl pallet_xcmp_handler::XcmpTransactor + for MockXcmpTransactor +where + T: Config + pallet::Config, + C: frame_support::traits::ReservableCurrency, +{ + fn transact_xcm( + _destination: Location, + _location: staging_xcm::latest::Location, + _fee: u128, + _caller: T::AccountId, + _transact_encoded_call: sp_std::vec::Vec, + _transact_encoded_call_weight: Weight, + _overall_weight: Weight, + _flow: InstructionSequence, + ) -> Result<(), sp_runtime::DispatchError> { + Ok(()) + } + + fn pay_xcm_fee( + _: CurrencyId, + _: T::AccountId, + _: u128, + ) -> Result<(), sp_runtime::DispatchError> { + Ok(()) + } +} + +pub struct MockConversionRateProvider; +impl FixedConversionRateProvider for MockConversionRateProvider { + fn get_fee_per_second(location: &Location) -> Option { + get_fee_per_second(location) + } +} + +pub struct MockTokenIdConvert; +impl Convert> for MockTokenIdConvert { + fn convert(id: CurrencyId) -> Option { + if id == NATIVE { + Some(Location::new(0, Here)) + } else if id == FOREIGN_CURRENCY_ID { + Some(Location::new(1, Parachain(PARA_ID))) + } else { + None + } + } +} + +impl Convert> for MockTokenIdConvert { + fn convert(location: Location) -> Option { + if location == Location::new(0, Here) { + Some(NATIVE) + } else if location == Location::new(1, Parachain(PARA_ID)) { + Some(FOREIGN_CURRENCY_ID) + } else { + None + } + } +} + +// TODO: We should extract this and share code with automation-time +pub struct MockEnsureProxy; +impl EnsureProxy for MockEnsureProxy { + fn ensure_ok(_delegator: AccountId, _delegatee: AccountId) -> Result<(), &'static str> { + if _delegator == DELEGATOR_ACCOUNT.into() && _delegatee == PROXY_ACCOUNT.into() { + Ok(()) + } else { + Err("proxy error: expected `ProxyType::Any`") + } + } +} + +parameter_types! { + pub const RelayNetwork: NetworkId = NetworkId::Rococo; + // The universal location within the global consensus system + pub UniversalLocation: InteriorLocation = X2([GlobalConsensus(RelayNetwork::get()), Parachain(ParachainInfo::parachain_id().into())].into()); +} + +// Build genesis storage according to the mock runtime. +pub fn new_test_ext(state_block_time: u64) -> sp_io::TestExternalities { + let genesis_storage = system::GenesisConfig::::default() + .build_storage() + .unwrap(); + let mut ext = sp_io::TestExternalities::new(genesis_storage); + ext.execute_with(|| System::set_block_number(1)); + ext.execute_with(|| Timestamp::set_timestamp(state_block_time)); + ext +} + +pub fn events() -> Vec { + let events = System::events(); + let evt = events.into_iter().map(|evt| evt.event).collect::>(); + + System::reset_events(); + + evt +} + +// A utility test function to pluck out the task id from events, useful when dealing with multiple +// task scheduling +pub fn get_task_ids_from_events() -> Vec { + System::events() + .into_iter() + .filter_map(|e| match e.event { + RuntimeEvent::AutomationPrice(crate::Event::TaskScheduled { task_id, .. }) => { + Some(task_id) + } + _ => None, + }) + .collect::>() +} + +pub fn get_xcmp_funds(account: AccountId) { + let double_action_weight = MockWeight::::run_xcmp_task() * 2; + let action_fee = ExecutionWeightFee::get() * u128::from(double_action_weight.ref_time()); + let max_execution_fee = action_fee * u128::from(1u32); + let with_xcm_fees = max_execution_fee + XmpFee::get(); + Balances::force_set_balance(RawOrigin::Root.into(), account, with_xcm_fees).unwrap(); +} + +#[derive(Clone)] +pub struct MockAssetFeePerSecond { + pub asset_location: Location, + pub fee_per_second: u128, +} + +pub fn get_asset_fee_per_second_config() -> Vec { + let asset_fee_per_second: [MockAssetFeePerSecond; 3] = [ + MockAssetFeePerSecond { + asset_location: Location { + parents: 1, + interior: Parachain(2000).into(), + }, + fee_per_second: 416_000_000_000, + }, + MockAssetFeePerSecond { + asset_location: Location { + parents: 1, + interior: X2([ + Parachain(2110), + GeneralKey { + length: 4, + data: [0; 32], + }, + ] + .into()), + }, + fee_per_second: 416_000_000_000, + }, + MockAssetFeePerSecond { + asset_location: get_moonbase_asset_location(), + fee_per_second: 10_000_000_000_000_000_000, + }, + ]; + asset_fee_per_second.to_vec() +} + +pub fn get_fee_per_second(location: &Location) -> Option { + let location = location + .clone() + .reanchored( + &Location::new(1, Parachain(::SelfParaId::get().into())), + &::UniversalLocation::get(), + ) + .expect("Reanchor location failed"); + + let found_asset = get_asset_fee_per_second_config().into_iter().find(|item| { + let MockAssetFeePerSecond { asset_location, .. } = item; + *asset_location == location + }); + + if let Some(asset) = found_asset { + Some(asset.fee_per_second) + } else { + None + } +} + +// setup a sample default asset to support test +pub fn setup_asset(sender: &AccountId32, chain: Vec) { + let _ = AutomationPrice::initialize_asset( + RawOrigin::Root.into(), + chain, + EXCHANGE1.to_vec(), + ASSET1.to_vec(), + ASSET2.to_vec(), + 10, + vec![sender.clone()], + ); +} + +// setup a few sample assets, initialize it with sane default vale and set a price to support test cases +pub fn setup_assets_and_prices(sender: &AccountId32, block_time: u128) { + let _ = AutomationPrice::initialize_asset( + RawOrigin::Root.into(), + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + ASSET1.to_vec(), + ASSET2.to_vec(), + 10, + vec![sender.clone()], + ); + + let _ = AutomationPrice::initialize_asset( + RawOrigin::Root.into(), + CHAIN2.to_vec(), + EXCHANGE1.to_vec(), + ASSET2.to_vec(), + ASSET3.to_vec(), + 10, + vec![sender.clone()], + ); + + let _ = AutomationPrice::initialize_asset( + RawOrigin::Root.into(), + CHAIN2.to_vec(), + EXCHANGE1.to_vec(), + ASSET1.to_vec(), + ASSET3.to_vec(), + 10, + vec![sender.clone()], + ); + + // This fixture function initialize 3 asset pairs, and set their price to 1000, 5000, 10_000 + const PAIR1_PRICE: u128 = 1000_u128; + const PAIR2_PRICE: u128 = 5000_u128; + const PAIR3_PRICE: u128 = 10_000_u128; + assert_ok!(AutomationPrice::update_asset_prices( + RuntimeOrigin::signed(sender.clone()), + vec![CHAIN1.to_vec()], + vec![EXCHANGE1.to_vec()], + vec![ASSET1.to_vec()], + vec![ASSET2.to_vec()], + vec![PAIR1_PRICE], + vec![block_time], + vec![1000], + )); + + assert_ok!(AutomationPrice::update_asset_prices( + RuntimeOrigin::signed(sender.clone()), + vec![CHAIN2.to_vec()], + vec![EXCHANGE1.to_vec()], + vec![ASSET2.to_vec()], + vec![ASSET3.to_vec()], + vec![PAIR2_PRICE], + vec![block_time], + vec![1000], + )); + + assert_ok!(AutomationPrice::update_asset_prices( + RuntimeOrigin::signed(sender.clone()), + vec![CHAIN2.to_vec()], + vec![EXCHANGE1.to_vec()], + vec![ASSET1.to_vec()], + vec![ASSET3.to_vec()], + vec![PAIR3_PRICE], + vec![block_time], + vec![1000], + )); +} diff --git a/pallets/automation-price/src/tests.rs b/pallets/automation-price/src/tests.rs new file mode 100644 index 000000000..eac8a66ed --- /dev/null +++ b/pallets/automation-price/src/tests.rs @@ -0,0 +1,1648 @@ +// This file is part of Ava Protocol. + +// Copyright (C) 2022 Ava Protocol +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + mock::*, AccountStats, Action, AssetPayment, Error, StatType, Task, TaskIdList, TaskStats, + Tasks, +}; +use pallet_xcmp_handler::InstructionSequence; + +use frame_support::{assert_noop, assert_ok, weights::Weight}; +use frame_system::{self, RawOrigin}; +use sp_runtime::{AccountId32, ArithmeticError}; + +use staging_xcm::latest::{prelude::*, Junction::Parachain, Location}; + +use sp_std::collections::btree_map::BTreeMap; + +pub const START_BLOCK_TIME: u64 = 33198768000 * 1_000; +pub const START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND: u128 = 33198768000 + 3600; + +// Helper function to asset event easiser +/// Assert the given `event` exists. +#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] +pub fn assert_has_event(event: RuntimeEvent) { + let evts = System::events() + .into_iter() + .map(|evt| evt.event) + .collect::>(); + assert!(evts.iter().any(|record| record == &event)) +} + +// Helper function to asset event easiser +/// Assert the given `event` not exists. +#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] +pub fn assert_no_event(event: RuntimeEvent) { + let evts = System::events() + .into_iter() + .map(|evt| evt.event) + .collect::>(); + assert!(evts.iter().all(|record| record != &event)) +} + +#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] +pub fn assert_last_event(event: RuntimeEvent) { + assert_eq!(events().last().expect("events expected"), &event); +} + +#[test] +fn test_initialize_asset_works() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let sender = AccountId32::new(ALICE); + assert_ok!(AutomationPrice::initialize_asset( + RawOrigin::Root.into(), + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + ASSET1.to_vec(), + ASSET2.to_vec(), + 10, + vec!(sender) + )); + + assert_has_event(RuntimeEvent::AutomationPrice(crate::Event::AssetCreated { + chain: CHAIN1.to_vec(), + exchange: EXCHANGE1.to_vec(), + asset1: ASSET1.to_vec(), + asset2: ASSET2.to_vec(), + decimal: 10, + })); + }) +} + +#[test] +fn test_initialize_asset_reject_duplicate_asset() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let sender = AccountId32::new(ALICE); + let _ = AutomationPrice::initialize_asset( + RawOrigin::Root.into(), + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + ASSET1.to_vec(), + ASSET2.to_vec(), + 10, + vec![sender.clone()], + ); + + assert_noop!( + AutomationPrice::initialize_asset( + RawOrigin::Root.into(), + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + ASSET1.to_vec(), + ASSET2.to_vec(), + 10, + vec!(sender) + ), + Error::::AssetAlreadyInitialized, + ); + }) +} + +#[test] +fn test_update_asset_prices() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let sender = AccountId32::new(ALICE); + + setup_asset(&sender, CHAIN1.to_vec()); + + assert_ok!(AutomationPrice::update_asset_prices( + RuntimeOrigin::signed(sender.clone()), + vec!(CHAIN1.to_vec()), + vec!(EXCHANGE1.to_vec()), + vec!(ASSET1.to_vec()), + vec!(ASSET2.to_vec()), + vec!(1005), + vec!(START_BLOCK_TIME as u128), + vec!(1), + )); + + let p = AutomationPrice::get_asset_price_data(( + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + (ASSET1.to_vec(), ASSET2.to_vec()), + )) + .expect("cannot get price"); + + assert_eq!(p.round, 1); + assert_eq!(p.value, 1005); + + assert_has_event(RuntimeEvent::AutomationPrice(crate::Event::AssetUpdated { + owner_id: sender, + chain: CHAIN1.to_vec(), + exchange: EXCHANGE1.to_vec(), + asset1: ASSET1.to_vec(), + asset2: ASSET2.to_vec(), + price: 1005, + })); + }) +} + +#[test] +fn test_update_asset_price_increase_round() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let sender = AccountId32::new(ALICE); + + setup_asset(&sender, CHAIN1.to_vec()); + + assert_ok!(AutomationPrice::update_asset_prices( + RuntimeOrigin::signed(sender.clone()), + vec!(CHAIN1.to_vec()), + vec!(EXCHANGE1.to_vec()), + vec!(ASSET1.to_vec()), + vec!(ASSET2.to_vec()), + vec!(1005), + vec!(START_BLOCK_TIME as u128), + vec!(1), + )); + + let p = AutomationPrice::get_asset_price_data(( + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + (ASSET1.to_vec(), ASSET2.to_vec()), + )) + .expect("cannot get price"); + + assert_eq!(p.round, 1); + assert_eq!(p.updated_at, (START_BLOCK_TIME / 1000).into()); + + Timestamp::set_timestamp( + (START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND * 1000) + .try_into() + .unwrap(), + ); + assert_ok!(AutomationPrice::update_asset_prices( + RuntimeOrigin::signed(sender), + vec!(CHAIN1.to_vec()), + vec!(EXCHANGE1.to_vec()), + vec!(ASSET1.to_vec()), + vec!(ASSET2.to_vec()), + vec!(1005), + vec!(START_BLOCK_TIME as u128), + vec!(1), + )); + + let p = AutomationPrice::get_asset_price_data(( + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + (ASSET1.to_vec(), ASSET2.to_vec()), + )) + .expect("cannot get price"); + + assert_eq!(p.round, 2); + assert_eq!(p.updated_at, START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND); + }) +} + +#[test] +fn test_update_asset_prices_multi() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let sender = AccountId32::new(ALICE); + + setup_asset(&sender, CHAIN1.to_vec()); + setup_asset(&sender, CHAIN2.to_vec()); + + assert_ok!(AutomationPrice::update_asset_prices( + RuntimeOrigin::signed(sender.clone()), + vec!(CHAIN1.to_vec(), CHAIN2.to_vec()), + vec!(EXCHANGE1.to_vec(), EXCHANGE1.to_vec()), + vec!(ASSET1.to_vec(), ASSET1.to_vec()), + vec!(ASSET2.to_vec(), ASSET2.to_vec()), + vec!(1005, 1009), + vec!(START_BLOCK_TIME as u128, START_BLOCK_TIME as u128), + vec!(1, 2), + )); + + let p1 = AutomationPrice::get_asset_price_data(( + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + (ASSET1.to_vec(), ASSET2.to_vec()), + )) + .expect("cannot get price"); + + assert_eq!(p1.round, 1); + assert_eq!(p1.value, 1005); + + let p2 = AutomationPrice::get_asset_price_data(( + CHAIN2.to_vec(), + EXCHANGE1.to_vec(), + (ASSET1.to_vec(), ASSET2.to_vec()), + )) + .expect("cannot get price"); + + assert_eq!(p2.round, 2); + assert_eq!(p2.value, 1009); + + assert_has_event(RuntimeEvent::AutomationPrice(crate::Event::AssetUpdated { + owner_id: sender.clone(), + chain: CHAIN1.to_vec(), + exchange: EXCHANGE1.to_vec(), + asset1: ASSET1.to_vec(), + asset2: ASSET2.to_vec(), + price: 1005, + })); + + assert_has_event(RuntimeEvent::AutomationPrice(crate::Event::AssetUpdated { + owner_id: sender, + chain: CHAIN2.to_vec(), + exchange: EXCHANGE1.to_vec(), + asset1: ASSET1.to_vec(), + asset2: ASSET2.to_vec(), + price: 1009, + })); + }) +} + +#[test] +fn test_schedule_xcmp_task_ok() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let para_id: u32 = 1000; + let creator = AccountId32::new(ALICE); + let call: Vec = vec![2, 4, 5]; + let destination = Location::new(1, Parachain(para_id)); + + setup_asset(&creator, CHAIN1.to_vec()); + + get_xcmp_funds(creator.clone()); + assert_ok!(AutomationPrice::schedule_xcmp_task( + RuntimeOrigin::signed(creator.clone()), + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + ASSET1.to_vec(), + ASSET2.to_vec(), + START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND, + "gt".as_bytes().to_vec(), + vec!(100), + Box::new(destination.clone().into()), + Box::new(NATIVE_LOCATION.into()), + Box::new(AssetPayment { + asset_location: Location::new(0, Here).into(), + amount: MOCK_XCMP_FEE + }), + call.clone(), + Weight::from_parts(100_000, 0), + Weight::from_parts(200_000, 0) + )); + + // Upon schedule, task will be insert into 3 places + // 1. TaskRegistry: a fast hashmap look up using task id only + // 2. SortedTasksIndex: an ordering BTreeMap of the task, only task id and its price + // trigger + // 3. AccountTasks: hashmap to look up user task id + + let task_ids = get_task_ids_from_events(); + let task_id = task_ids.first().expect("task failed to schedule"); + + let task = AutomationPrice::get_task(&creator, &task_id).expect("missing task in registry"); + assert_eq!( + task.trigger_function, + "gt".as_bytes().to_vec(), + "created task has wrong trigger function" + ); + assert_eq!( + task.chain, + CHAIN1.to_vec(), + "created task has different chain id" + ); + assert_eq!( + task.asset_pair.0, ASSET1, + "created task has wrong asset pair" + ); + + assert_eq!(START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND, task.expired_at); + + // Ensure task is inserted into the right SortedIndex + + // Create second task, and make sure both are recorded + get_xcmp_funds(creator.clone()); + assert_ok!(AutomationPrice::schedule_xcmp_task( + RuntimeOrigin::signed(creator.clone()), + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + ASSET1.to_vec(), + ASSET2.to_vec(), + START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND, + "gt".as_bytes().to_vec(), + vec!(100), + Box::new(destination.into()), + Box::new(NATIVE_LOCATION.into()), + Box::new(AssetPayment { + asset_location: Location::new(0, Here).into(), + amount: MOCK_XCMP_FEE + }), + call, + Weight::from_parts(100_000, 0), + Weight::from_parts(200_000, 0) + )); + let task_ids2 = get_task_ids_from_events(); + let task_id2 = task_ids2.last().expect("task failed to schedule"); + assert_ne!(task_id, task_id2, "task id dup"); + + let sorted_task_index = AutomationPrice::get_sorted_tasks_index(( + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + (ASSET1.to_vec(), ASSET2.to_vec()), + "gt".as_bytes().to_vec(), + )) + .unwrap(); + let task_ids: Vec> = sorted_task_index.into_values().collect(); + assert_eq!( + task_ids, + vec!(vec!( + (creator.clone(), "1-0-4".as_bytes().to_vec()), + (creator.clone(), "1-0-7".as_bytes().to_vec()), + )) + ); + + // We had schedule 2 tasks so far, all two belong to the same account + assert_eq!( + 2, + AutomationPrice::get_task_stat(StatType::TotalTasksOverall).map_or(0, |v| v), + "total task count is incorrect" + ); + assert_eq!( + 2, + AutomationPrice::get_account_stat(creator, StatType::TotalTasksPerAccount) + .map_or(0, |v| v), + "total task count is incorrect" + ); + }) +} + +// Verify when user having not enough fund, we will fail with the right error code +#[test] +fn test_schedule_xcmp_task_fail_not_enough_balance() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let para_id: u32 = 1000; + let creator = AccountId32::new(ALICE); + let call: Vec = vec![2, 4, 5]; + let destination = Location::new(1, Parachain(para_id)); + + setup_asset(&creator, CHAIN1.to_vec()); + + get_xcmp_funds(creator.clone()); + assert_noop!( + AutomationPrice::schedule_xcmp_task( + RuntimeOrigin::signed(creator), + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + ASSET1.to_vec(), + ASSET2.to_vec(), + START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND, + "gt".as_bytes().to_vec(), + vec!(100), + Box::new(destination.into()), + Box::new(NATIVE_LOCATION.into()), + Box::new(AssetPayment { + asset_location: Location::new(0, Here).into(), + // Make a really high fee to simulate not enough balance + amount: MOCK_XCMP_FEE * 10_000 + }), + call, + Weight::from_parts(100_000, 0), + Weight::from_parts(200_000, 0) + ), + Error::::FeePaymentError, + ); + }) +} + +// Verify that upon scheduling a task, the task expiration will be inserted into +// SortedTasksByExpiration and shard by expired_at. +#[test] +fn test_schedule_put_task_to_expiration_queue() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let para_id: u32 = 1000; + let creator = AccountId32::new(ALICE); + let call: Vec = vec![2, 4, 5]; + let destination = Location::new(1, Parachain(para_id)); + + setup_assets_and_prices(&creator, START_BLOCK_TIME as u128); + // Lets setup 3 tasks + get_xcmp_funds(creator.clone()); + assert_ok!(AutomationPrice::schedule_xcmp_task( + RuntimeOrigin::signed(creator.clone()), + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + ASSET1.to_vec(), + ASSET2.to_vec(), + START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND, + "gt".as_bytes().to_vec(), + vec!(100), + Box::new(destination.into()), + Box::new(NATIVE_LOCATION.into()), + Box::new(AssetPayment { + asset_location: Location::new(0, Here).into(), + amount: MOCK_XCMP_FEE + }), + call, + Weight::from_parts(100_000, 0), + Weight::from_parts(200_000, 0) + )); + let task_ids = get_task_ids_from_events(); + let task_id = task_ids.last().expect("task failed to schedule"); + + let task_expiration_map = AutomationPrice::get_sorted_tasks_by_expiration(); + assert_eq!( + task_expiration_map + .get(&(START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND)) + .expect("missing task expiration shard"), + &(BTreeMap::from([(task_id.clone(), creator)])) + ); + }) +} + +// Verify that upon scheduling a task, the task expiration will be inserted into +// SortedTasksByExpiration and shard by expired_at. +// This test is similar as above test but we create multiple task wit different expiration to +// ensure all of them got to the right spot by expired_at time. +#[test] +fn test_schedule_put_task_to_expiration_queue_multi() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let para_id: u32 = 1000; + let creator1 = AccountId32::new(ALICE); + let creator2 = AccountId32::new(BOB); + let call: Vec = vec![2, 4, 5]; + let destination = Location::new(1, Parachain(para_id)); + + setup_asset(&creator1, CHAIN1.to_vec()); + + get_xcmp_funds(creator1.clone()); + assert_ok!(AutomationPrice::schedule_xcmp_task( + RuntimeOrigin::signed(creator1.clone()), + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + ASSET1.to_vec(), + ASSET2.to_vec(), + START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND, + "gt".as_bytes().to_vec(), + vec!(100), + Box::new(destination.clone().into()), + Box::new(NATIVE_LOCATION.into()), + Box::new(AssetPayment { + asset_location: Location::new(0, Here).into(), + amount: 100_000 + }), + call.clone(), + Weight::from_parts(100_000, 0), + Weight::from_parts(200_000, 0) + )); + let task_ids1 = get_task_ids_from_events(); + let task_id1 = task_ids1.last().expect("task failed to schedule"); + + get_xcmp_funds(creator2.clone()); + assert_ok!(AutomationPrice::schedule_xcmp_task( + RuntimeOrigin::signed(creator2.clone()), + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + ASSET1.to_vec(), + ASSET2.to_vec(), + START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND + 3600, + "lt".as_bytes().to_vec(), + vec!(100), + Box::new(destination.into()), + Box::new(NATIVE_LOCATION.into()), + Box::new(AssetPayment { + asset_location: Location::new(0, Here).into(), + amount: MOCK_XCMP_FEE + }), + call, + Weight::from_parts(100_000, 0), + Weight::from_parts(200_000, 0) + )); + let task_ids2 = get_task_ids_from_events(); + let task_id2 = task_ids2.last().expect("task failed to schedule"); + + let task_expiration_map = AutomationPrice::get_sorted_tasks_by_expiration(); + assert_eq!( + task_expiration_map + .get(&START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND) + .expect("missing task expiration shard"), + &BTreeMap::from([(task_id1.clone(), creator1)]), + ); + assert_eq!( + task_expiration_map + .get(&(START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND + 3600)) + .expect("missing task expiration shard"), + &BTreeMap::from([(task_id2.clone(), creator2)]), + ); + }) +} + +// Verify that after calling sweep, expired task will be removed from all relevant storage. Our +// stat is also decrease accordingly to the task removal +#[test] +fn test_sweep_expired_task_works() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let creator = AccountId32::new(ALICE); + let other_creator = AccountId32::new(BOB); + let para_id: u32 = 1000; + + setup_assets_and_prices(&creator, START_BLOCK_TIME as u128); + + let destination = Location::new(1, Parachain(para_id)); + let schedule_fee = Location::default(); + let execution_fee = AssetPayment { + asset_location: Location::new(1, Parachain(para_id)).into(), + amount: MOCK_XCMP_FEE, + }; + let encoded_call_weight = Weight::from_parts(100_000, 0); + let overall_weight = Weight::from_parts(200_000, 0); + + let expired_task_gen = 10; + let price_target1 = 2000; + for i in 0..expired_task_gen { + // schedule task that has expired + get_xcmp_funds(creator.clone()); + let task = Task:: { + owner_id: creator.clone(), + task_id: format!("123-0-{:?}", i).as_bytes().to_vec(), + chain: CHAIN1.to_vec(), + exchange: EXCHANGE1.to_vec(), + asset_pair: (ASSET1.to_vec(), ASSET2.to_vec()), + expired_at: START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND - 1800, + trigger_function: "gt".as_bytes().to_vec(), + trigger_params: vec![price_target1], + action: Action::XCMP { + destination: destination.clone(), + schedule_fee: schedule_fee.clone(), + execution_fee: execution_fee.clone(), + encoded_call: vec![1, 2, 3], + encoded_call_weight, + overall_weight, + schedule_as: None, + instruction_sequence: InstructionSequence::PayThroughRemoteDerivativeAccount, + }, + }; + assert_ok!(AutomationPrice::validate_and_schedule_task(task.clone())); + } + + // Now we set timestamp to a later point + Timestamp::set_timestamp(START_BLOCK_TIME.saturating_add(3_600_000_u64)); + + let price_target2 = 1000; + let task = Task:: { + owner_id: other_creator.clone(), + task_id: "123-1-1".as_bytes().to_vec(), + chain: CHAIN1.to_vec(), + exchange: EXCHANGE1.to_vec(), + asset_pair: (ASSET1.to_vec(), ASSET2.to_vec()), + expired_at: START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND + 3600, + trigger_function: "lt".as_bytes().to_vec(), + trigger_params: vec![price_target2], + action: Action::XCMP { + destination, + schedule_fee, + execution_fee, + encoded_call: vec![1, 2, 3], + encoded_call_weight, + overall_weight, + schedule_as: None, + instruction_sequence: InstructionSequence::PayThroughRemoteDerivativeAccount, + }, + }; + get_xcmp_funds(other_creator.clone()); + assert_ok!(AutomationPrice::validate_and_schedule_task(task)); + + assert_eq!( + u128::try_from(Tasks::::iter().count()).unwrap(), + expired_task_gen + 1 + ); + + assert_eq!( + // 10 task by creator, 1 task by other_creator + 11, + AutomationPrice::get_task_stat(StatType::TotalTasksOverall).map_or(0, |v| v), + "total task count is incorrect" + ); + assert_eq!( + 10, + AutomationPrice::get_account_stat(creator.clone(), StatType::TotalTasksPerAccount) + .map_or(0, |v| v), + "total task count is incorrect" + ); + assert_eq!( + 1, + AutomationPrice::get_account_stat( + other_creator.clone(), + StatType::TotalTasksPerAccount + ) + .map_or(0, |v| v), + "total task count is incorrect" + ); + + assert_eq!( + 10, + AutomationPrice::get_sorted_tasks_index(( + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + (ASSET1.to_vec(), ASSET2.to_vec()), + "gt".as_bytes().to_vec(), + )) + .map_or(0, |v| v.get(&price_target1).unwrap().iter().len()) + ); + + assert_eq!( + 1, + AutomationPrice::get_sorted_tasks_index(( + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + (ASSET1.to_vec(), ASSET2.to_vec()), + "lt".as_bytes().to_vec(), + )) + .map_or(0, |v| v.get(&price_target2).unwrap().iter().len()) + ); + + assert_eq!( + 10, + AutomationPrice::get_sorted_tasks_by_expiration() + .get(&(START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND - 1800)) + .expect("missing task expiration shard") + .len(), + ); + + // now we will sweep, passing a weight limit. In actualy code, this will be the + // remaining_weight in on_idle block + let remain_weight = 100_000_000_000; + AutomationPrice::sweep_expired_task(Weight::from_parts(remain_weight, 0)); + + assert_eq!( + AutomationPrice::get_sorted_tasks_by_expiration() + .get(&(START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND - 1800)), + None + ); + + for i in 0..expired_task_gen { + assert_has_event(RuntimeEvent::AutomationPrice(crate::Event::TaskSweep { + owner_id: creator.clone(), + task_id: format!("123-0-{:?}", i).as_bytes().to_vec(), + condition: crate::TaskCondition::AlreadyExpired { + expired_at: START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND - 1800, + now: START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND, + }, + })); + } + + // After sweep there should only one task remain in queue + assert_eq!(Tasks::::iter().count(), 1); + + // The task should be removed from the SortedTasksIndex + assert_eq!( + 0, + AutomationPrice::get_sorted_tasks_index(( + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + (ASSET1.to_vec(), ASSET2.to_vec()), + "gt".as_bytes().to_vec(), + )) + .expect("missing tasks sorted by price data") + .get(&price_target1) + .map_or(0, |v| v.iter().len()) + ); + // The task should be removed from the SortedTasksIndex + assert_eq!( + 1, + AutomationPrice::get_sorted_tasks_index(( + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + (ASSET1.to_vec(), ASSET2.to_vec()), + "lt".as_bytes().to_vec(), + )) + .expect("missing tasks sorted by price data") + .get(&price_target2) + .map_or(0, |v| v.iter().len()) + ); + + // The task stat should be changed + assert_eq!( + 1, + AutomationPrice::get_task_stat(StatType::TotalTasksOverall).map_or(0, |v| v), + "total task count is incorrect" + ); + assert_eq!( + 1, + AutomationPrice::get_account_stat(other_creator, StatType::TotalTasksPerAccount) + .map_or(0, |v| v), + "total task count is incorrect" + ); + }) +} + +// Test swap partially data, and leave the rest of sorted index remain intact +#[test] +fn test_sweep_expired_task_partially() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let creator = AccountId32::new(ALICE); + let _other_creator = AccountId32::new(BOB); + let para_id: u32 = 1000; + + setup_assets_and_prices(&creator, START_BLOCK_TIME as u128); + let destination = Location::new(1, Parachain(para_id)); + let schedule_fee = Location::default(); + let execution_fee = AssetPayment { + asset_location: Location::new(1, Parachain(para_id)).into(), + amount: MOCK_XCMP_FEE, + }; + let encoded_call_weight = Weight::from_parts(100_000, 0); + let overall_weight = Weight::from_parts(200_000, 0); + + let expired_task_gen = 11; + let price_target1 = 2000; + for i in 1..expired_task_gen { + // schedule task that has expired + get_xcmp_funds(creator.clone()); + let task = Task:: { + owner_id: creator.clone(), + task_id: format!("123-0-{:?}", i).as_bytes().to_vec(), + chain: CHAIN1.to_vec(), + exchange: EXCHANGE1.to_vec(), + asset_pair: (ASSET1.to_vec(), ASSET2.to_vec()), + expired_at: START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND + 10 + i * 10, + trigger_function: "gt".as_bytes().to_vec(), + trigger_params: vec![price_target1], + action: Action::XCMP { + destination: destination.clone(), + schedule_fee: schedule_fee.clone(), + execution_fee: execution_fee.clone(), + encoded_call: vec![1, 2, 3], + encoded_call_weight, + overall_weight, + schedule_as: None, + instruction_sequence: InstructionSequence::PayThroughRemoteDerivativeAccount, + }, + }; + assert_ok!(AutomationPrice::validate_and_schedule_task(task.clone())); + } + + // Now we set timestamp to a later point + Timestamp::set_timestamp(START_BLOCK_TIME.saturating_add((3600 + 6 * 10) * 1000)); + + assert_eq!( + 10, + AutomationPrice::get_sorted_tasks_index(( + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + (ASSET1.to_vec(), ASSET2.to_vec()), + "gt".as_bytes().to_vec(), + )) + .map_or(0, |v| v.get(&price_target1).unwrap().iter().len()) + ); + + assert_eq!(10, AutomationPrice::get_sorted_tasks_by_expiration().len()); + + // remaining_weight in on_idle block + let remain_weight = 100_000_000_000; + AutomationPrice::sweep_expired_task(Weight::from_parts(remain_weight, 0)); + + // The task should be removed from the SortedTasksIndex + assert_eq!( + 5, + AutomationPrice::get_sorted_tasks_index(( + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + (ASSET1.to_vec(), ASSET2.to_vec()), + "gt".as_bytes().to_vec(), + )) + .expect("missing tasks sorted by price data") + .get(&price_target1) + .map_or(0, |v| v.iter().len()) + ); + + // these task all get sweeo + for i in 1..5 { + assert_eq!( + 0, + AutomationPrice::get_sorted_tasks_by_expiration() + .get(&(START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND + 10 + i * 10)) + .map_or(0, |v| v.len()), + ); + } + + // these slot remian untouch + for i in 6..10 { + assert_eq!( + 1, + AutomationPrice::get_sorted_tasks_by_expiration() + .get(&(START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND + 10 + i * 10)) + .map_or(0, |v| v.len()), + ); + } + }) +} + +#[test] +fn test_schedule_return_error_when_reaching_max_tasks_overall_limit() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let para_id: u32 = 1000; + let creator = AccountId32::new(ALICE); + let call: Vec = vec![2, 4, 5]; + let destination = Location::new(1, Parachain(para_id)); + + setup_asset(&creator, CHAIN1.to_vec()); + + TaskStats::::insert(StatType::TotalTasksOverall, 1_000_000_000); + + assert_noop!( + AutomationPrice::schedule_xcmp_task( + RuntimeOrigin::signed(creator), + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + ASSET1.to_vec(), + ASSET2.to_vec(), + START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND + 3600, + "gt".as_bytes().to_vec(), + vec!(100), + Box::new(destination.into()), + Box::new(NATIVE_LOCATION.into()), + Box::new(AssetPayment { + asset_location: Location::new(0, Here).into(), + amount: 10000000000000 + }), + call, + Weight::from_parts(100_000, 0), + Weight::from_parts(200_000, 0) + ), + Error::::MaxTasksReached, + ); + }) +} + +#[test] +fn test_schedule_return_error_when_reaching_max_account_tasks_limit() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let para_id: u32 = 1000; + let creator = AccountId32::new(ALICE); + let call: Vec = vec![2, 4, 5]; + let destination = Location::new(1, Parachain(para_id)); + + setup_asset(&creator, CHAIN1.to_vec()); + + AccountStats::::insert(creator.clone(), StatType::TotalTasksPerAccount, 1_000); + + assert_noop!( + AutomationPrice::schedule_xcmp_task( + RuntimeOrigin::signed(creator), + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + ASSET1.to_vec(), + ASSET2.to_vec(), + START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND, + "gt".as_bytes().to_vec(), + vec!(100), + Box::new(destination.into()), + Box::new(NATIVE_LOCATION.into()), + Box::new(AssetPayment { + asset_location: Location::new(0, Here).into(), + amount: MOCK_XCMP_FEE + }), + call, + Weight::from_parts(100_000, 0), + Weight::from_parts(200_000, 0) + ), + Error::::MaxTasksPerAccountReached, + ); + }) +} + +// Test when price moves, the TaskQueue will be populated with the right task id +// +// In this test we will first setup 3 tasks for 3 pairs +// task1 for pair1 +// task2 for pair2 +// task3 for pair3 +// +// when we will adjust the price of the pair, we purposely let task2 price not match +// so we will test that task1, task3 will be moved into the queue accordingly. +// +// then finally schedule a new task4, for pair3, but its price will match. we will test +// that task4 are moved to TaskQueue. task2 won't be moved +// +// The purpose of this test is to simulae a few tasks being trigger accordingly to their +// price movement. We also verify that task won't have its price match, will never get +// trigger +#[test] +fn test_shift_tasks_movement_through_price_changes() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + // TODO: Setup fund once we add fund check and weight + let para_id: u32 = 1000; + let creator = AccountId32::new(ALICE); + let call: Vec = vec![2, 4, 5]; + let destination = Location::new(1, Parachain(para_id)); + + setup_assets_and_prices(&creator, START_BLOCK_TIME as u128); + + let base_price = 10_000_u128; + + // Lets setup 3 tasks + get_xcmp_funds(creator.clone()); + assert_ok!(AutomationPrice::schedule_xcmp_task( + RuntimeOrigin::signed(creator.clone()), + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + ASSET1.to_vec(), + ASSET2.to_vec(), + START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND, + "gt".as_bytes().to_vec(), + vec!(base_price + 1000), + Box::new(destination.clone().into()), + Box::new(NATIVE_LOCATION.into()), + Box::new(AssetPayment { + asset_location: Location::new(0, Here).into(), + amount: MOCK_XCMP_FEE + }), + call.clone(), + Weight::from_parts(100_000, 0), + Weight::from_parts(200_000, 0) + )); + + get_xcmp_funds(creator.clone()); + assert_ok!(AutomationPrice::schedule_xcmp_task( + RuntimeOrigin::signed(creator.clone()), + CHAIN2.to_vec(), + EXCHANGE1.to_vec(), + ASSET2.to_vec(), + ASSET3.to_vec(), + START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND, + "gt".as_bytes().to_vec(), + vec!(base_price + 900), + Box::new(destination.clone().into()), + Box::new(NATIVE_LOCATION.into()), + Box::new(AssetPayment { + asset_location: Location::new(0, Here).into(), + amount: MOCK_XCMP_FEE + }), + call.clone(), + Weight::from_parts(100_000, 0), + Weight::from_parts(200_000, 0) + )); + + get_xcmp_funds(creator.clone()); + assert_ok!(AutomationPrice::schedule_xcmp_task( + RuntimeOrigin::signed(creator.clone()), + CHAIN2.to_vec(), + EXCHANGE1.to_vec(), + ASSET1.to_vec(), + ASSET3.to_vec(), + START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND + 6000, + "gt".as_bytes().to_vec(), + vec!(base_price + 1000), + Box::new(destination.clone().into()), + Box::new(NATIVE_LOCATION.into()), + Box::new(AssetPayment { + asset_location: Location::new(0, Here).into(), + amount: MOCK_XCMP_FEE + }), + call.clone(), + Weight::from_parts(100_000, 0), + Weight::from_parts(200_000, 0) + )); + + let task_ids = get_task_ids_from_events(); + let task_id1 = task_ids.get(task_ids.len().wrapping_sub(3)).unwrap(); + // let _task_id2 = task_ids.get(task_ids.len().wrapping_sub(2)).unwrap(); + let task_id3 = task_ids.get(task_ids.len().wrapping_sub(1)).unwrap(); + + // at this moment our task queue is empty + // There is schedule tasks, but no tasks in the queue at this moment, because shift_tasks + // has not run yet + assert!(AutomationPrice::get_task_queue().is_empty()); + + // shift_tasks move task from registry to the queue + // At this moment, The price doesn't match the target so there is no change in our tasks + AutomationPrice::shift_tasks(Weight::from_parts(1_000_000_000, 0)); + assert!(AutomationPrice::get_task_queue().is_empty()); + let sorted_task_index = AutomationPrice::get_sorted_tasks_index(( + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + (ASSET1.to_vec(), ASSET2.to_vec()), + "gt".as_bytes().to_vec(), + )); + assert_eq!(sorted_task_index.map_or_else(|| 0, |x| x.len()), 1); + + // now we change price of pair1 to higher than its target price, while keeping pair2/pair3 low enough, + // only task_id1 will be moved to the queue. + // The target price for those respectively tasks are 10100, 10900, 102000 in their pair + // Therefore after running this price update, first task task_id1 are moved into TaskQueue + let new_pair_1_price: u128 = base_price + 2000; + let new_pair_2_price: u128 = 10_u128; + let mut new_pair_3_price: u128 = 300_u128; + assert_ok!(AutomationPrice::update_asset_prices( + RuntimeOrigin::signed(creator.clone()), + vec!(CHAIN1.to_vec(), CHAIN2.to_vec(), CHAIN2.to_vec()), + vec!(EXCHANGE1.to_vec(), EXCHANGE1.to_vec(), EXCHANGE1.to_vec()), + vec!(ASSET1.to_vec(), ASSET2.to_vec(), ASSET1.to_vec()), + vec!(ASSET2.to_vec(), ASSET3.to_vec(), ASSET3.to_vec()), + vec!(new_pair_1_price, new_pair_2_price, new_pair_3_price), + vec!( + START_BLOCK_TIME as u128, + START_BLOCK_TIME as u128, + START_BLOCK_TIME as u128 + ), + vec!(1, 2, 3), + )); + AutomationPrice::shift_tasks(Weight::from_parts(1_000_000_000, 0)); + assert_eq!( + AutomationPrice::get_task_queue(), + vec![(creator.clone(), task_id1.clone())] + ); + // The task are removed from SortedTasksIndex into the TaskQueue, therefore their length + // decrease to 0 + assert_eq!( + AutomationPrice::get_sorted_tasks_index(( + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + (ASSET1.to_vec(), ASSET2.to_vec()), + "gt".as_bytes().to_vec(), + )) + .map_or_else(|| 0, |x| x.len()), + 0 + ); + + // now we move target price of pair3 to higher than its target, and will observe that its + // task will be moved to TaskQueue too. + new_pair_3_price = base_price + 2000; + let _ = AutomationPrice::update_asset_prices( + RuntimeOrigin::signed(creator.clone()), + vec![CHAIN2.to_vec()], + vec![EXCHANGE1.to_vec()], + vec![ASSET1.to_vec()], + vec![ASSET3.to_vec()], + vec![new_pair_3_price], + vec![START_BLOCK_TIME as u128], + vec![4], + ); + AutomationPrice::shift_tasks(Weight::from_parts(1_000_000_000, 0)); + assert_eq!( + AutomationPrice::get_task_queue(), + vec![ + (creator.clone(), task_id1.clone()), + (creator.clone(), task_id3.clone()) + ] + ); + // The task are removed from SortedTasksIndex into the TaskQueue, therefore their length + // decrease to 0 + assert_eq!( + AutomationPrice::get_sorted_tasks_index(( + CHAIN2.to_vec(), + EXCHANGE1.to_vec(), + (ASSET1.to_vec(), ASSET3.to_vec()), + "gt".as_bytes().to_vec(), + )) + .map_or_else(|| 0, |x| x.len()), + 0 + ); + + // Now, if a new task come up, and its price target matches the existing price, they will + // be trigger too. + get_xcmp_funds(creator.clone()); + assert_ok!(AutomationPrice::schedule_xcmp_task( + RuntimeOrigin::signed(creator.clone()), + CHAIN2.to_vec(), + EXCHANGE1.to_vec(), + ASSET2.to_vec(), + ASSET3.to_vec(), + START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND, + "lt".as_bytes().to_vec(), + // price for this asset is 10 in our last update + vec!(20), + Box::new(destination.into()), + Box::new(NATIVE_LOCATION.into()), + Box::new(AssetPayment { + asset_location: Location::new(0, Here).into(), + amount: MOCK_XCMP_FEE + }), + call, + Weight::from_parts(100_000, 0), + Weight::from_parts(200_000, 0) + )); + // The task is now on the SortedTasksIndex + assert_eq!( + AutomationPrice::get_sorted_tasks_index(( + CHAIN2.to_vec(), + EXCHANGE1.to_vec(), + (ASSET2.to_vec(), ASSET3.to_vec()), + "lt".as_bytes().to_vec(), + )) + .map_or_else(|| 0, |x| x.len()), + 1 + ); + + AutomationPrice::shift_tasks(Weight::from_parts(1_000_000_000, 0)); + let task_id4 = { + let task_ids = get_task_ids_from_events(); + task_ids.last().unwrap().clone() + }; + + // Now the task is again, moved into the queue and be removed from SortedTasksIndex + assert_eq!( + AutomationPrice::get_task_queue(), + vec![ + (creator.clone(), task_id1.clone()), + (creator.clone(), task_id3.clone()), + (creator, task_id4) + ] + ); + assert_eq!( + AutomationPrice::get_sorted_tasks_index(( + CHAIN2.to_vec(), + EXCHANGE1.to_vec(), + (ASSET2.to_vec(), ASSET3.to_vec()), + "lt".as_bytes().to_vec(), + )) + .map_or_else(|| 0, |x| x.len()), + 0 + ); + }) +} + +// the logic around > or < using include/exclude range to include bound or not, it can be subtle +// and error prone to human mistake so this test exist to make sure we catch that edge case. +#[test] +fn test_gt_task_not_run_when_asset_price_equal_target_price() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + // TODO: Setup fund once we add fund check and weight + let para_id: u32 = 1000; + let creator = AccountId32::new(ALICE); + let call: Vec = vec![2, 4, 5]; + let destination = Location::new(1, Parachain(para_id)); + + setup_assets_and_prices(&creator, START_BLOCK_TIME as u128); + + let base_price = 1_000_u128; + + get_xcmp_funds(creator.clone()); + assert_ok!(AutomationPrice::schedule_xcmp_task( + RuntimeOrigin::signed(creator), + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + ASSET1.to_vec(), + ASSET2.to_vec(), + START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND, + "gt".as_bytes().to_vec(), + vec!(base_price), + Box::new(destination.into()), + Box::new(NATIVE_LOCATION.into()), + Box::new(AssetPayment { + asset_location: Location::new(0, Here).into(), + amount: 100_000 + }), + call, + Weight::from_parts(100_000, 0), + Weight::from_parts(200_000, 0) + )); + + AutomationPrice::shift_tasks(Weight::from_parts(1_000_000_000, 0)); + // Task shouldn't be move to task queue to trigger, and the task queue should be empty + assert!(AutomationPrice::get_task_queue().is_empty()); + + let sorted_task_index = AutomationPrice::get_sorted_tasks_index(( + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + (ASSET1.to_vec(), ASSET2.to_vec()), + "gt".as_bytes().to_vec(), + )); + assert_eq!(1, sorted_task_index.map_or_else(|| 0, |x| x.len())); + }) +} + +#[test] +fn test_emit_event_when_execute_tasks() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let creator = AccountId32::new(ALICE); + let para_id: u32 = 1000; + + setup_assets_and_prices(&creator, START_BLOCK_TIME as u128); + + let destination = Location::new(1, Parachain(para_id)); + let schedule_fee = Location::default(); + let execution_fee = AssetPayment { + asset_location: Location::new(1, Parachain(para_id)).into(), + amount: MOCK_XCMP_FEE, + }; + let encoded_call_weight = Weight::from_parts(100_000, 0); + let overall_weight = Weight::from_parts(200_000, 0); + + get_xcmp_funds(creator.clone()); + let task = Task:: { + owner_id: creator, + task_id: "123-0-1".as_bytes().to_vec(), + chain: CHAIN1.to_vec(), + exchange: EXCHANGE1.to_vec(), + asset_pair: (ASSET1.to_vec(), ASSET2.to_vec()), + expired_at: (START_BLOCK_TIME + 10000) as u128, + trigger_function: "gt".as_bytes().to_vec(), + trigger_params: vec![123], + action: Action::XCMP { + destination, + schedule_fee, + execution_fee, + encoded_call: vec![1, 2, 3], + encoded_call_weight, + overall_weight, + schedule_as: None, + instruction_sequence: InstructionSequence::PayThroughRemoteDerivativeAccount, + }, + }; + + let _ = AutomationPrice::validate_and_schedule_task(task.clone()); + + AutomationPrice::run_tasks( + vec![(task.owner_id.clone(), task.task_id.clone())], + 100_000_000_000.into(), + ); + + assert_has_event(RuntimeEvent::AutomationPrice(crate::Event::TaskTriggered { + owner_id: task.owner_id.clone(), + task_id: task.task_id.clone(), + condition: crate::TaskCondition::TargetPriceMatched { + chain: task.chain.clone(), + exchange: task.exchange.clone(), + asset_pair: task.asset_pair.clone(), + price: 1000_u128, + }, + })); + + assert_has_event(RuntimeEvent::AutomationPrice(crate::Event::TaskExecuted { + owner_id: task.owner_id.clone(), + task_id: task.task_id, + })); + }) +} + +#[test] +fn test_decrease_task_count_when_execute_tasks() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let creator1 = AccountId32::new(ALICE); + let creator2 = AccountId32::new(BOB); + let para_id: u32 = 1000; + + setup_assets_and_prices(&creator1, START_BLOCK_TIME as u128); + + let destination = Location::new(1, Parachain(para_id)); + let schedule_fee = Location::default(); + let execution_fee = AssetPayment { + asset_location: Location::new(1, Parachain(para_id)).into(), + amount: MOCK_XCMP_FEE, + }; + let encoded_call_weight = Weight::from_parts(100_000, 0); + let overall_weight = Weight::from_parts(200_000, 0); + + get_xcmp_funds(creator1.clone()); + let task1 = Task:: { + owner_id: creator1.clone(), + task_id: "123-0-1".as_bytes().to_vec(), + chain: CHAIN1.to_vec(), + exchange: EXCHANGE1.to_vec(), + asset_pair: (ASSET1.to_vec(), ASSET2.to_vec()), + expired_at: (START_BLOCK_TIME + 10000) as u128, + trigger_function: "gt".as_bytes().to_vec(), + trigger_params: vec![123], + action: Action::XCMP { + destination: destination.clone(), + schedule_fee: schedule_fee.clone(), + execution_fee: execution_fee.clone(), + encoded_call: vec![1, 2, 3], + encoded_call_weight, + overall_weight, + schedule_as: None, + instruction_sequence: InstructionSequence::PayThroughRemoteDerivativeAccount, + }, + }; + + get_xcmp_funds(creator2.clone()); + let task2 = Task:: { + owner_id: creator2.clone(), + task_id: "123-1-1".as_bytes().to_vec(), + chain: CHAIN1.to_vec(), + exchange: EXCHANGE1.to_vec(), + asset_pair: (ASSET1.to_vec(), ASSET2.to_vec()), + expired_at: (START_BLOCK_TIME + 10000) as u128, + trigger_function: "gt".as_bytes().to_vec(), + trigger_params: vec![123], + action: Action::XCMP { + destination, + schedule_fee, + execution_fee, + encoded_call: vec![1, 2, 3], + encoded_call_weight, + overall_weight, + schedule_as: None, + instruction_sequence: InstructionSequence::PayThroughRemoteDerivativeAccount, + }, + }; + + let _ = AutomationPrice::validate_and_schedule_task(task1.clone()); + let _ = AutomationPrice::validate_and_schedule_task(task2); + + assert_eq!( + 2, + AutomationPrice::get_task_stat(StatType::TotalTasksOverall).map_or(0, |v| v), + "total task count is wrong" + ); + assert_eq!( + 1, + AutomationPrice::get_account_stat(creator1.clone(), StatType::TotalTasksPerAccount) + .map_or(0, |v| v), + "total task count is wrong" + ); + assert_eq!( + 1, + AutomationPrice::get_account_stat(creator2, StatType::TotalTasksPerAccount) + .map_or(0, |v| v), + "total task count is wrong" + ); + + AutomationPrice::run_tasks( + vec![(task1.owner_id.clone(), task1.task_id)], + 100_000_000_000.into(), + ); + + assert_eq!( + 1, + AutomationPrice::get_task_stat(StatType::TotalTasksOverall).map_or(0, |v| v), + "total task count is wrong" + ); + assert_eq!( + 0, + AutomationPrice::get_account_stat(creator1, StatType::TotalTasksPerAccount) + .map_or(0, |v| v), + "total task count of creator1 is wrong" + ); + }) +} + +// when running a task, if the task is already expired, the execution engine won't run the task, +// instead an even TaskExpired is emiited +#[test] +fn test_expired_task_not_run() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let creator = AccountId32::new(ALICE); + let para_id: u32 = 1000; + + let destination = Location::new(1, Parachain(para_id)); + let schedule_fee = Location::default(); + let execution_fee = AssetPayment { + asset_location: Location::new(1, Parachain(para_id)).into(), + amount: MOCK_XCMP_FEE, + }; + let encoded_call_weight = Weight::from_parts(100_000, 0); + let overall_weight = Weight::from_parts(200_000, 0); + + get_xcmp_funds(creator.clone()); + let task = Task:: { + owner_id: creator, + task_id: "123-0-1".as_bytes().to_vec(), + chain: CHAIN1.to_vec(), + exchange: EXCHANGE1.to_vec(), + asset_pair: (ASSET1.to_vec(), ASSET2.to_vec()), + expired_at: START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND, + trigger_function: "gt".as_bytes().to_vec(), + trigger_params: vec![123], + action: Action::XCMP { + destination, + schedule_fee, + execution_fee, + encoded_call: vec![1, 2, 3], + encoded_call_weight, + overall_weight, + schedule_as: None, + instruction_sequence: InstructionSequence::PayThroughRemoteDerivativeAccount, + }, + }; + + let _ = AutomationPrice::validate_and_schedule_task(task.clone()); + + // Moving the clock to simulate the task expiration + Timestamp::set_timestamp(START_BLOCK_TIME.saturating_add(7_200_000_u64)); + AutomationPrice::run_tasks( + vec![(task.owner_id.clone(), task.task_id.clone())], + 100_000_000_000.into(), + ); + + assert_no_event(RuntimeEvent::AutomationPrice(crate::Event::TaskTriggered { + owner_id: task.owner_id.clone(), + task_id: task.task_id.clone(), + condition: crate::TaskCondition::TargetPriceMatched { + chain: task.chain.clone(), + exchange: task.exchange.clone(), + asset_pair: task.asset_pair.clone(), + price: 1000_u128, + }, + })); + + assert_no_event(RuntimeEvent::AutomationPrice(crate::Event::TaskExecuted { + owner_id: task.owner_id.clone(), + task_id: task.task_id.clone(), + })); + + assert_last_event(RuntimeEvent::AutomationPrice(crate::Event::TaskExpired { + owner_id: task.owner_id.clone(), + task_id: task.task_id.clone(), + condition: crate::TaskCondition::AlreadyExpired { + expired_at: task.expired_at, + now: START_BLOCK_TIME + .saturating_add(7_200_000_u64) + .checked_div(1000) + .ok_or(ArithmeticError::Overflow) + .expect("blocktime is out of range") as u128, + }, + })); + }) +} + +// when running a task, if the price has been moved against the target price, rendering the target +// price condition not match anymore. we will skip run +#[test] +fn test_price_move_against_target_price_skip_run() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let creator = AccountId32::new(ALICE); + let para_id: u32 = 1000; + + setup_assets_and_prices(&creator, START_BLOCK_TIME as u128); + + get_xcmp_funds(creator.clone()); + let destination = Location::new(1, Parachain(para_id)); + let schedule_fee = Location::default(); + let execution_fee = AssetPayment { + asset_location: Location::new(1, Parachain(para_id)).into(), + amount: MOCK_XCMP_FEE, + }; + let encoded_call_weight = Weight::from_parts(100_000, 0); + let overall_weight = Weight::from_parts(200_000, 0); + + let task = Task:: { + owner_id: creator, + task_id: "123-0-1".as_bytes().to_vec(), + chain: CHAIN1.to_vec(), + exchange: EXCHANGE1.to_vec(), + asset_pair: (ASSET1.to_vec(), ASSET2.to_vec()), + expired_at: START_BLOCK_TIME + .checked_div(1000) + .map_or(10000000_u128, |v| v.into()) + .saturating_add(100), + trigger_function: "gt".as_bytes().to_vec(), + // This asset price is set to 1000 + // The task is config to run when price > 2000, and we invoked it directly + // so and we will observe that task won't run due to price doesn't match + // the condition + trigger_params: vec![2000], + action: Action::XCMP { + destination, + schedule_fee, + execution_fee, + encoded_call: vec![1, 2, 3], + encoded_call_weight, + overall_weight, + schedule_as: None, + instruction_sequence: InstructionSequence::PayThroughRemoteDerivativeAccount, + }, + }; + + let _ = AutomationPrice::validate_and_schedule_task(task.clone()); + + AutomationPrice::run_tasks( + vec![(task.owner_id.clone(), task.task_id.clone())], + 100_000_000_000.into(), + ); + + assert_no_event(RuntimeEvent::AutomationPrice(crate::Event::TaskTriggered { + owner_id: task.owner_id.clone(), + task_id: task.task_id.clone(), + condition: crate::TaskCondition::TargetPriceMatched { + chain: task.chain.clone(), + exchange: task.exchange.clone(), + asset_pair: task.asset_pair.clone(), + price: 1000_u128, + }, + })); + + assert_no_event(RuntimeEvent::AutomationPrice(crate::Event::TaskExecuted { + owner_id: task.owner_id.clone(), + task_id: task.task_id.clone(), + })); + + assert_last_event(RuntimeEvent::AutomationPrice( + crate::Event::PriceAlreadyMoved { + owner_id: task.owner_id.clone(), + task_id: task.task_id, + condition: crate::TaskCondition::PriceAlreadyMoved { + chain: CHAIN1.to_vec(), + exchange: EXCHANGE1.to_vec(), + asset_pair: (ASSET1.to_vec(), ASSET2.to_vec()), + price: 1000_u128, + target_price: 2000_u128, + }, + }, + )); + }) +} + +// When canceling, task is removed from 3 places: +#[test] +fn test_cancel_task_works() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let creator = AccountId32::new(ALICE); + let para_id: u32 = 1000; + + let destination = Location::new(1, Parachain(para_id)); + let schedule_fee = Location::default(); + let execution_fee = AssetPayment { + asset_location: Location::new(1, Parachain(para_id)).into(), + amount: MOCK_XCMP_FEE, + }; + let encoded_call_weight = Weight::from_parts(100_000, 0); + let overall_weight = Weight::from_parts(200_000, 0); + + get_xcmp_funds(creator.clone()); + let task = Task:: { + owner_id: creator, + task_id: "123-0-1".as_bytes().to_vec(), + chain: CHAIN1.to_vec(), + exchange: EXCHANGE1.to_vec(), + asset_pair: (ASSET1.to_vec(), ASSET2.to_vec()), + expired_at: START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND, + trigger_function: "gt".as_bytes().to_vec(), + trigger_params: vec![123], + action: Action::XCMP { + destination, + schedule_fee, + execution_fee, + encoded_call: vec![1, 2, 3], + encoded_call_weight, + overall_weight, + schedule_as: None, + instruction_sequence: InstructionSequence::PayThroughRemoteDerivativeAccount, + }, + }; + let _ = AutomationPrice::validate_and_schedule_task(task.clone()); + + let _ = AutomationPrice::cancel_task( + RuntimeOrigin::signed(task.owner_id.clone()), + task.task_id.clone(), + ); + + assert_has_event(RuntimeEvent::AutomationPrice(crate::Event::TaskCancelled { + owner_id: task.owner_id.clone(), + task_id: task.task_id, + })); + }) +} + +#[test] +fn test_delete_asset_ok() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let sender = AccountId32::new(ALICE); + let key = ( + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + (ASSET1.to_vec(), ASSET2.to_vec()), + ); + + setup_asset(&sender, CHAIN1.to_vec()); + let _ = AutomationPrice::update_asset_prices( + RuntimeOrigin::signed(sender), + vec![CHAIN1.to_vec()], + vec![EXCHANGE1.to_vec()], + vec![ASSET1.to_vec()], + vec![ASSET2.to_vec()], + vec![6789_u128], + vec![START_BLOCK_TIME as u128], + vec![4], + ); + + assert!(AutomationPrice::get_asset_registry_info(&key).is_some()); + assert!(AutomationPrice::get_asset_price_data(&key).is_some()); + + // Now we delete asset, all the relevant asset metadata and price should be deleted + let _ = AutomationPrice::delete_asset( + RawOrigin::Root.into(), + CHAIN1.to_vec(), + EXCHANGE1.to_vec(), + ASSET1.to_vec(), + ASSET2.to_vec(), + ); + + assert!(AutomationPrice::get_asset_registry_info(&key).is_none()); + assert!(AutomationPrice::get_asset_price_data(&key).is_none()); + + assert_has_event(RuntimeEvent::AutomationPrice(crate::Event::AssetDeleted { + chain: CHAIN1.to_vec(), + exchange: EXCHANGE1.to_vec(), + asset1: ASSET1.to_vec(), + asset2: ASSET2.to_vec(), + })); + }) +} diff --git a/pallets/automation-price/src/trigger.rs b/pallets/automation-price/src/trigger.rs new file mode 100644 index 000000000..ab7affe09 --- /dev/null +++ b/pallets/automation-price/src/trigger.rs @@ -0,0 +1,64 @@ +// This file is part of Ava Protocol. + +// Copyright (C) 2022 Ava Protocol +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +use crate::{Config, PriceData, Task}; + +use sp_std::ops::{ + Bound, + Bound::{Excluded, Included}, +}; + +pub const TRIGGER_FUNC_GT: &[u8] = "gt".as_bytes(); +pub const TRIGGER_FUNC_LT: &[u8] = "lt".as_bytes(); + +pub trait PriceConditionMatch { + fn is_price_condition_match(&self, price: &PriceData) -> bool; +} + +impl PriceConditionMatch for Task { + /// check that the task has its condition match the target price of asset + /// + /// # Argument + /// + /// * `price` - the desire price of the asset to check on + fn is_price_condition_match(&self, price: &PriceData) -> bool { + // trigger when target price > current price of the asset + // Example: + // - current price: 100, the task is has target price: 50 -> runable + // - current price: 100, the task is has target price: 150 -> not runable + // + + if self.trigger_function == TRIGGER_FUNC_GT.to_vec() { + price.value > self.trigger_params[0] + } else { + price.value < self.trigger_params[0] + } + } +} + +/// Given a condition, and a target price, generate a range that match the condition +pub fn range_by_trigger_func( + trigger_func: &[u8], + current_price: &PriceData, +) -> (Bound, Bound) { + //Eg sell order, sell when price > + if trigger_func == TRIGGER_FUNC_GT { + (Excluded(u128::MIN), Excluded(current_price.value)) + } else { + // Eg buy order, buy when price < target + (Included(current_price.value), Excluded(u128::MAX)) + } +} diff --git a/pallets/automation-price/src/types.rs b/pallets/automation-price/src/types.rs new file mode 100644 index 000000000..309036688 --- /dev/null +++ b/pallets/automation-price/src/types.rs @@ -0,0 +1,69 @@ +// This file is part of Ava Protocol. + +// Copyright (C) 2022 Ava Protocol +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +use crate::{weights::WeightInfo, Config, InstructionSequence}; + +use frame_support::pallet_prelude::*; + +use sp_std::prelude::*; + +use staging_xcm::{latest::prelude::*, VersionedLocation}; + +/// The struct that stores execution payment for a task. +#[derive(Debug, Encode, Eq, PartialEq, Decode, TypeInfo, Clone)] +pub struct AssetPayment { + pub asset_location: VersionedLocation, + pub amount: u128, +} + +/// The enum that stores all action specific data. +#[derive(Clone, Debug, Eq, PartialEq, Encode, Decode, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub enum Action { + XCMP { + destination: Location, + schedule_fee: Location, + execution_fee: AssetPayment, + encoded_call: Vec, + encoded_call_weight: Weight, + overall_weight: Weight, + schedule_as: Option, + instruction_sequence: InstructionSequence, + }, +} + +impl Action { + pub fn execution_weight(&self) -> Result { + let weight = match self { + Action::XCMP { .. } => ::WeightInfo::run_xcmp_task(), + }; + Ok(weight.ref_time()) + } + + pub fn schedule_fee_location(&self) -> Location { + match self { + Action::XCMP { schedule_fee, .. } => (*schedule_fee).clone(), + } + } +} + +/// The enum represent the type of metric we track +#[derive(Clone, Debug, Eq, PartialEq, Encode, Decode, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub enum StatType { + TotalTasksOverall, + TotalTasksPerAccount, +} diff --git a/pallets/automation-price/src/weights.rs b/pallets/automation-price/src/weights.rs new file mode 100644 index 000000000..e0bb42a21 --- /dev/null +++ b/pallets/automation-price/src/weights.rs @@ -0,0 +1,261 @@ +// This file is part of Ava Protocol. + +// Copyright (C) 2022 Ava Protocol +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Autogenerated weights for pallet_automation_price +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2024-01-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `actions-runner-1`, CPU: `Intel(R) Xeon(R) E-2388G CPU @ 3.20GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("turing-dev"), DB CACHE: 1024 + +// Executed Command: +// ./oak-collator +// benchmark +// pallet +// --header +// ./.maintain/HEADER-GPL3 +// --chain +// turing-dev +// --execution +// wasm +// --wasm-execution +// compiled +// --pallet +// pallet_automation_price +// --extrinsic +// * +// --repeat +// 20 +// --steps +// 50 +// --output +// ./automation_price-raw-weights.rs +// --template +// ./.maintain/frame-weight-template.hbs + +// Summary: +//:initialize_asset_extrinsic 20_554_305,3541 +//:asset_price_update_extrinsic 7_797_323,1493 +//:schedule_xcmp_task_extrinsic 12_384_000,1493 +//:cancel_task_extrinsic 9_310_000,3579 +//:run_xcmp_task 42_789_000,3946 +//:remove_task 21_707_000,3579 +//:emit_event 5_274_000,0 + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for pallet_automation_price. +pub trait WeightInfo { + fn initialize_asset_extrinsic(v: u32, ) -> Weight; + fn asset_price_update_extrinsic(v: u32, ) -> Weight; + fn schedule_xcmp_task_extrinsic() -> Weight; + fn cancel_task_extrinsic() -> Weight; + fn run_xcmp_task() -> Weight; + fn remove_task() -> Weight; + fn emit_event() -> Weight; +} + +/// Weights for pallet_automation_price using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: AutomationPrice AssetRegistry (r:1 w:1) + /// Proof Skipped: AutomationPrice AssetRegistry (max_values: None, max_size: None, mode: Measured) + /// The range of component `v` is `[1, 5]`. + fn initialize_asset_extrinsic(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `3541` + // Minimum execution time: 19_966_000 picoseconds. + Weight::from_parts(20_554_305, 3541) + // Standard Error: 4_362 + .saturating_add(Weight::from_parts(23_140, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// The range of component `v` is `[1, 100]`. + fn asset_price_update_extrinsic(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `1493` + // Minimum execution time: 7_412_000 picoseconds. + Weight::from_parts(7_797_323, 1493) + // Standard Error: 196 + .saturating_add(Weight::from_parts(165_275, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + fn schedule_xcmp_task_extrinsic() -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `1493` + // Minimum execution time: 12_253_000 picoseconds. + Weight::from_parts(12_384_000, 1493) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: AutomationPrice Tasks (r:1 w:0) + /// Proof Skipped: AutomationPrice Tasks (max_values: None, max_size: None, mode: Measured) + fn cancel_task_extrinsic() -> Weight { + // Proof Size summary in bytes: + // Measured: `114` + // Estimated: `3579` + // Minimum execution time: 9_118_000 picoseconds. + Weight::from_parts(9_310_000, 3579) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: UnknownTokens ConcreteFungibleBalances (r:1 w:0) + /// Proof Skipped: UnknownTokens ConcreteFungibleBalances (max_values: None, max_size: None, mode: Measured) + /// Storage: AssetRegistry LocationToAssetId (r:1 w:0) + /// Proof Skipped: AssetRegistry LocationToAssetId (max_values: None, max_size: None, mode: Measured) + fn run_xcmp_task() -> Weight { + // Proof Size summary in bytes: + // Measured: `481` + // Estimated: `3946` + // Minimum execution time: 36_498_000 picoseconds. + Weight::from_parts(42_789_000, 3946) + .saturating_add(T::DbWeight::get().reads(3_u64)) + } + /// Storage: AutomationPrice SortedTasksIndex (r:1 w:0) + /// Proof Skipped: AutomationPrice SortedTasksIndex (max_values: None, max_size: None, mode: Measured) + /// Storage: AutomationPrice SortedTasksByExpiration (r:1 w:1) + /// Proof Skipped: AutomationPrice SortedTasksByExpiration (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: AutomationPrice TaskStats (r:1 w:0) + /// Proof Skipped: AutomationPrice TaskStats (max_values: None, max_size: None, mode: Measured) + /// Storage: AutomationPrice AccountStats (r:1 w:0) + /// Proof Skipped: AutomationPrice AccountStats (max_values: None, max_size: None, mode: Measured) + /// Storage: AutomationPrice Tasks (r:0 w:1) + /// Proof Skipped: AutomationPrice Tasks (max_values: None, max_size: None, mode: Measured) + fn remove_task() -> Weight { + // Proof Size summary in bytes: + // Measured: `114` + // Estimated: `3579` + // Minimum execution time: 21_329_000 picoseconds. + Weight::from_parts(21_707_000, 3579) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + fn emit_event() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_119_000 picoseconds. + Weight::from_parts(5_274_000, 0) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + /// Storage: AutomationPrice AssetRegistry (r:1 w:1) + /// Proof Skipped: AutomationPrice AssetRegistry (max_values: None, max_size: None, mode: Measured) + /// The range of component `v` is `[1, 5]`. + fn initialize_asset_extrinsic(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `3541` + // Minimum execution time: 19_966_000 picoseconds. + Weight::from_parts(20_554_305, 3541) + // Standard Error: 4_362 + .saturating_add(Weight::from_parts(23_140, 0).saturating_mul(v.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// The range of component `v` is `[1, 100]`. + fn asset_price_update_extrinsic(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `1493` + // Minimum execution time: 7_412_000 picoseconds. + Weight::from_parts(7_797_323, 1493) + // Standard Error: 196 + .saturating_add(Weight::from_parts(165_275, 0).saturating_mul(v.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + fn schedule_xcmp_task_extrinsic() -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `1493` + // Minimum execution time: 12_253_000 picoseconds. + Weight::from_parts(12_384_000, 1493) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + /// Storage: AutomationPrice Tasks (r:1 w:0) + /// Proof Skipped: AutomationPrice Tasks (max_values: None, max_size: None, mode: Measured) + fn cancel_task_extrinsic() -> Weight { + // Proof Size summary in bytes: + // Measured: `114` + // Estimated: `3579` + // Minimum execution time: 9_118_000 picoseconds. + Weight::from_parts(9_310_000, 3579) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: UnknownTokens ConcreteFungibleBalances (r:1 w:0) + /// Proof Skipped: UnknownTokens ConcreteFungibleBalances (max_values: None, max_size: None, mode: Measured) + /// Storage: AssetRegistry LocationToAssetId (r:1 w:0) + /// Proof Skipped: AssetRegistry LocationToAssetId (max_values: None, max_size: None, mode: Measured) + fn run_xcmp_task() -> Weight { + // Proof Size summary in bytes: + // Measured: `481` + // Estimated: `3946` + // Minimum execution time: 36_498_000 picoseconds. + Weight::from_parts(42_789_000, 3946) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + } + /// Storage: AutomationPrice SortedTasksIndex (r:1 w:0) + /// Proof Skipped: AutomationPrice SortedTasksIndex (max_values: None, max_size: None, mode: Measured) + /// Storage: AutomationPrice SortedTasksByExpiration (r:1 w:1) + /// Proof Skipped: AutomationPrice SortedTasksByExpiration (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: AutomationPrice TaskStats (r:1 w:0) + /// Proof Skipped: AutomationPrice TaskStats (max_values: None, max_size: None, mode: Measured) + /// Storage: AutomationPrice AccountStats (r:1 w:0) + /// Proof Skipped: AutomationPrice AccountStats (max_values: None, max_size: None, mode: Measured) + /// Storage: AutomationPrice Tasks (r:0 w:1) + /// Proof Skipped: AutomationPrice Tasks (max_values: None, max_size: None, mode: Measured) + fn remove_task() -> Weight { + // Proof Size summary in bytes: + // Measured: `114` + // Estimated: `3579` + // Minimum execution time: 21_329_000 picoseconds. + Weight::from_parts(21_707_000, 3579) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + fn emit_event() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_119_000 picoseconds. + Weight::from_parts(5_274_000, 0) + } +} diff --git a/pallets/automation-time/Cargo.toml b/pallets/automation-time/Cargo.toml new file mode 100644 index 000000000..c59c41f83 --- /dev/null +++ b/pallets/automation-time/Cargo.toml @@ -0,0 +1,132 @@ +[package] +name = "pallet-automation-time" +authors = [ "Ava Protocol Team" ] +description = "Pallet for scheduling and running tasks in the future." +edition = "2021" +homepage = "https://avaprotocol.org" +license = "GPL-3.0" +readme = "README.md" +repository = "https://github.com/AvaProtocol/tanssi-integration" +version = "1.0.0" + +[package.metadata.docs.rs] +targets = [ "x86_64-unknown-linux-gnu" ] + +[dependencies] +hex = { workspace = true, features = [ "alloc" ] } +log = { workspace = true } +parity-scale-codec = { workspace = true, default-features = false, features = [ "derive" ] } +scale-info = { workspace = true, features = [ "derive" ] } + +# Polkadot +polkadot-parachain-primitives = { workspace = true } +staging-xcm = { workspace = true } + +# Cumulus dependencies +cumulus-pallet-xcm = { workspace = true } +cumulus-primitives-core = { workspace = true } + +# Substrate Dependencies +## Substrate Primitive Dependencies +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } + +## Substrate FRAME Dependencies +frame-benchmarking = { workspace = true, optional = true } +frame-support = { workspace = true } +frame-system = { workspace = true } + +## Substrate Pallet Dependencies +pallet-balances = { workspace = true } +pallet-timestamp = { workspace = true } + +## Polkdadot deps +staging-xcm-builder = { workspace = true } + +## ORML +orml-currencies = { workspace = true } +orml-traits = { workspace = true } + +## Local +ava-protocol-primitives = { workspace = true } +pallet-xcmp-handler = { workspace = true } + + +[dev-dependencies] +rand = { workspace = true } +serde = { workspace = true } +sp-core = { workspace = true } + +pallet-balances = { workspace = true } +pallet-xcm = { workspace = true } +staging-xcm-executor = { workspace = true } + +# Cumulus dependencies +parachain-info = { workspace = true } + +orml-currencies = { workspace = true } +orml-tokens = { workspace = true } + +[features] +default = [ "std" ] +std = [ + "ava-protocol-primitives/std", + "cumulus-pallet-xcm/std", + "cumulus-primitives-core/std", + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", + "hex/std", + "log/std", + "orml-currencies/std", + "orml-tokens/std", + "orml-traits/std", + "pallet-balances/std", + "pallet-timestamp/std", + "pallet-xcm/std", + "pallet-xcmp-handler/std", + "pallet-xcmp-handler/std", + "parachain-info/std", + "parity-scale-codec/std", + "polkadot-parachain-primitives/std", + "rand/std", + "scale-info/std", + "serde/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "staging-xcm-builder/std", + "staging-xcm-executor/std", + "staging-xcm/std", +] +dev-queue = [] +runtime-benchmarks = [ + "cumulus-primitives-core/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "orml-tokens/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "pallet-xcm/runtime-benchmarks", + "pallet-xcmp-handler/runtime-benchmarks", + "polkadot-parachain-primitives/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "staging-xcm-builder/runtime-benchmarks", + "staging-xcm-executor/runtime-benchmarks", +] +try-runtime = [ + "cumulus-pallet-xcm/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "orml-currencies/try-runtime", + "orml-tokens/try-runtime", + "pallet-balances/try-runtime", + "pallet-timestamp/try-runtime", + "pallet-xcm/try-runtime", + "pallet-xcmp-handler/try-runtime", + "parachain-info/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/pallets/automation-time/src/benchmarking.rs b/pallets/automation-time/src/benchmarking.rs new file mode 100644 index 000000000..a31e3b116 --- /dev/null +++ b/pallets/automation-time/src/benchmarking.rs @@ -0,0 +1,473 @@ +// This file is part of Ava Protocol. + +// Copyright (C) 2022 Ava Protocol +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(feature = "runtime-benchmarks")] + +//mod mock; +use super::*; +use frame_benchmarking::{account, benchmarks}; +use frame_system::RawOrigin; +use pallet_timestamp::Pallet as Timestamp; +use polkadot_parachain_primitives::primitives::Sibling; +use sp_runtime::traits::{AccountIdConversion, Saturating}; +use sp_std::cmp; +use xcm::latest::prelude::*; + +use crate::{Config, MissedTaskV2Of, Pallet as AutomationTime, TaskOf}; + +const SEED: u32 = 0; +// existential deposit multiplier +const ED_MULTIPLIER: u32 = 1_000; +// ensure enough funds to execute tasks +const DEPOSIT_MULTIPLIER: u32 = 100_000_000; + +fn assert_last_event(generic_event: ::RuntimeEvent) { + frame_system::Pallet::::assert_last_event(generic_event.into()); +} + +fn schedule_notify_tasks(owner: T::AccountId, times: Vec, count: u32) -> Vec { + let transfer_amount = T::Currency::minimum_balance().saturating_mul(ED_MULTIPLIER.into()); + T::Currency::deposit_creating( + &owner, + transfer_amount.saturating_mul(DEPOSIT_MULTIPLIER.into()), + ); + let time_moment: u32 = times[0].saturated_into(); + Timestamp::::set_timestamp(time_moment.into()); + let mut task_id = vec![0u8]; + + for _ in 0..count { + task_id = increment_task_id(task_id); + let task = TaskOf::::create_event_task::( + owner.clone(), + task_id.clone(), + times.clone(), + vec![4, 5, 6], + vec![], + ) + .unwrap(); + let task_id = AutomationTime::::schedule_task(&task).unwrap(); + >::insert(owner.clone(), task_id, task); + } + + task_id +} + +fn schedule_xcmp_tasks( + owner: T::AccountId, + schedule_as: Option, + times: Vec, + count: u32, +) -> Vec { + let transfer_amount = T::Currency::minimum_balance().saturating_mul(ED_MULTIPLIER.into()); + T::Currency::deposit_creating( + &owner, + transfer_amount.saturating_mul(DEPOSIT_MULTIPLIER.into()), + ); + let para_id: u32 = 2001; + let time_moment: u32 = times[0].saturated_into(); + Timestamp::::set_timestamp(time_moment.into()); + let mut task_id = vec![0u8]; + + for _ in 0..count { + task_id = increment_task_id(task_id); + let task = TaskOf::::create_xcmp_task::( + owner.clone(), + task_id.clone(), + times.clone(), + MultiLocation::new(1, X1(Parachain(para_id))), + MultiLocation::default(), + AssetPayment { + asset_location: MultiLocation::new(1, Parachain(para_id)).into(), + amount: 0, + }, + vec![4, 5, 6], + Weight::from_parts(5_000, 0), + Weight::from_parts(10_000, 0), + schedule_as.clone(), + InstructionSequence::PayThroughSovereignAccount, + vec![], + ) + .unwrap(); + let task_id = AutomationTime::::schedule_task(&task).unwrap(); + >::insert(owner.clone(), task_id, task); + } + + task_id +} + +fn increment_task_id(mut task_id: Vec) -> Vec { + let last = task_id.last_mut().unwrap(); + if let Some(next) = last.checked_add(1) { + *last = next; + } else { + task_id.push(0u8); + } + task_id +} + +benchmarks! { + schedule_xcmp_task_full { + let v in 1..T::MaxExecutionTimes::get(); + + let mut max_tasks_per_slot: u32 = ( + T::MaxWeightPerSlot::get() / ::WeightInfo::run_xcmp_task().ref_time() as u128 + ).try_into().unwrap(); + max_tasks_per_slot = cmp::min(max_tasks_per_slot, T::MaxTasksPerSlot::get()); + + let caller: T::AccountId = account("caller", 0, SEED); + let time: u64 = 7200; + let currency_id: T::CurrencyId = 1u32.into(); + let para_id: u32 = 2110; + let call = vec![4,5,6]; + + let mut times: Vec = vec![]; + for i in 1..=v { + let hour: u64 = (3600 * i).try_into().unwrap(); + times.push(hour); + } + let schedule = ScheduleParam::Fixed { execution_times: times.clone() }; + + let destination = MultiLocation::new(1, X1(Parachain(para_id))); + + let schedule_fee = T::CurrencyIdConvert::convert(currency_id).expect("IncoveribleCurrencyId"); + + let fee = AssetPayment { asset_location: MultiLocation::new(0, Here).into(), amount: 100u128 }; + + let task_id = schedule_xcmp_tasks::(caller.clone(), None, times, max_tasks_per_slot - 1); + let foreign_currency_amount = T::MultiCurrency::minimum_balance(currency_id.into()) + .saturating_add(1u32.into()) + .saturating_mul(ED_MULTIPLIER.into()) + .saturating_mul(DEPOSIT_MULTIPLIER.into()); + let _ = T::MultiCurrency::deposit(currency_id.into(), &caller, foreign_currency_amount); + }: schedule_xcmp_task(RawOrigin::Signed(caller), schedule, Box::new(destination.into()), Box::new(schedule_fee.into()), Box::new(fee), call, Weight::from_parts(1_000, 0), Weight::from_parts(2_000, 0), InstructionSequence::PayThroughSovereignAccount, None) + + schedule_auto_compound_delegated_stake_task_full { + let task_weight = ::WeightInfo::run_auto_compound_delegated_stake_task().ref_time(); + let max_tasks_per_slot_by_weight: u32 = (T::MaxWeightPerSlot::get() / task_weight as u128).try_into().unwrap(); + let max_tasks_per_slot = max_tasks_per_slot_by_weight.min(T::MaxTasksPerSlot::get()); + + let delegator: T::AccountId = account("delegator", 0, SEED); + let collator: T::AccountId = account("collator", 0, max_tasks_per_slot); + let account_minimum = T::Currency::minimum_balance(); + let starting_balance = account_minimum.saturating_mul(ED_MULTIPLIER.into()) + .saturating_add(task_weight.saturating_mul(T::ExecutionWeightFee::get().saturated_into()).saturated_into()); + let time: u64 = 3600; + + T::Currency::deposit_creating(&delegator, starting_balance.saturated_into()); + schedule_auto_compound_delegated_stake_tasks::(delegator.clone(), time, max_tasks_per_slot - 1); + }: schedule_auto_compound_delegated_stake_task(RawOrigin::Signed(delegator), time, 3600 , collator, account_minimum) + + schedule_dynamic_dispatch_task { + let v in 1 .. T::MaxExecutionTimes::get(); + + Timestamp::::set_timestamp(1u32.into()); // Set to non-zero default for testing + + let times = (1..=v).map(|i| { + 3600 * i as UnixTime + }).collect(); + let schedule = ScheduleParam::Fixed { execution_times: times }; + + let caller: T::AccountId = account("caller", 0, SEED); + let call: ::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + + let account_min = T::Currency::minimum_balance().saturating_mul(ED_MULTIPLIER.into()); + T::Currency::deposit_creating(&caller, account_min.saturating_mul(DEPOSIT_MULTIPLIER.into())); + + let initial_event_count = frame_system::Pallet::::event_count() as u8; + }: schedule_dynamic_dispatch_task(RawOrigin::Signed(caller.clone()), schedule, Box::new(call.clone())) + verify { + { + //panic!("events count: {:?} final {:?} data:{:?}", initial_event_count, frame_system::Pallet::::event_count(), frame_system::Pallet::::events()); + // the task id schedule on first block, first extrinsics, but depend on which path in unitest or in benchmark the initial_event_count might be different therefore we get it from the initial state + assert_last_event::(Event::TaskScheduled { who: caller, schedule_as: None, task_id: format!("1-0-{:?}", initial_event_count).as_bytes().to_vec(), encoded_call: Some(call.encode()) }.into()) + } + } + + schedule_dynamic_dispatch_task_full { + let v in 1 .. T::MaxExecutionTimes::get(); + + Timestamp::::set_timestamp(1u32.into()); // Set to non-zero default for testing + + let times: Vec = (1..=v).map(|i| { + 3600 * i as UnixTime + }).collect(); + let schedule = ScheduleParam::Fixed { execution_times: times.clone() }; + + let caller: T::AccountId = account("caller", 0, SEED); + let call: ::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + + let account_min = T::Currency::minimum_balance().saturating_mul(ED_MULTIPLIER.into()); + T::Currency::deposit_creating(&caller, account_min.saturating_mul(DEPOSIT_MULTIPLIER.into())); + + // Almost fill up all time slots + schedule_notify_tasks::(caller.clone(), times, T::MaxTasksPerSlot::get() - 1); + let initial_event_count = frame_system::Pallet::::event_count() as u8; + }: schedule_dynamic_dispatch_task(RawOrigin::Signed(caller.clone()), schedule, Box::new(call.clone())) + verify { + // the task id schedule on first block, first extrinsics, but depend on which path in unitest or in benchmark the initial_event_count might be different therefore we get it from the initial state + assert_last_event::(Event::TaskScheduled { who: caller, schedule_as: None, task_id: format!("1-0-{:?}", initial_event_count).as_bytes().to_vec(), encoded_call: Some(call.encode()) }.into()) + } + + cancel_scheduled_task_full { + let caller: T::AccountId = account("caller", 0, SEED); + let mut times: Vec = vec![]; + + for i in 0..T::MaxExecutionTimes::get() { + let hour: u64 = (3600 * (i + 1)).try_into().unwrap(); + times.push(hour); + } + + let task_id = schedule_notify_tasks::(caller.clone(), times, T::MaxTasksPerSlot::get()); + }: cancel_task(RawOrigin::Signed(caller), task_id) + + force_cancel_scheduled_task { + let caller: T::AccountId = account("caller", 0, SEED); + let time: u64 = 10800; + + let task_id = schedule_notify_tasks::(caller.clone(), vec![time], 1); + }: force_cancel_task(RawOrigin::Root, caller, task_id) + + force_cancel_scheduled_task_full { + let caller: T::AccountId = account("caller", 0, SEED); + let mut times: Vec = vec![]; + + for i in 0..T::MaxExecutionTimes::get() { + let hour: u64 = (3600 * (i + 1)).try_into().unwrap(); + times.push(hour); + } + + let task_id = schedule_notify_tasks::(caller.clone(), times, T::MaxTasksPerSlot::get()); + }: force_cancel_task(RawOrigin::Root, caller, task_id) + + cancel_task_with_schedule_as_full { + let caller: T::AccountId = account("caller", 0, SEED); + let schedule_as: T::AccountId = account("schedule_as", 0, SEED); + let time: u64 = 10800; + let para_id: u32 = 2001; + let mut times: Vec = vec![]; + + // Fill up all time slots + for i in 0..T::MaxExecutionTimes::get() { + let hour: u64 = (3600 * (i + 1)).try_into().unwrap(); + times.push(hour); + } + + let local_para_id: u32 = 2114; + let destination = MultiLocation::new(1, X1(Parachain(para_id))); + let local_sovereign_account: T::AccountId = Sibling::from(local_para_id).into_account_truncating(); + T::Currency::deposit_creating( + &local_sovereign_account, + T::Currency::minimum_balance().saturating_mul(DEPOSIT_MULTIPLIER.into()), + ); + + let fee = AssetPayment { asset_location: MultiLocation::new(1, Parachain(para_id)).into(), amount: 1000u128 }; + let task_id = schedule_xcmp_tasks::(caller.clone(), Some(schedule_as.clone()), times, 1); + }: cancel_task_with_schedule_as(RawOrigin::Signed(schedule_as), caller, task_id) + + run_xcmp_task { + let caller: T::AccountId = account("caller", 0, SEED); + let time: u64 = 10800; + let para_id: u32 = 2001; + let call = vec![4,5,6]; + + let local_para_id: u32 = 2114; + let destination = MultiLocation::new(1, X1(Parachain(para_id))); + let local_sovereign_account: T::AccountId = Sibling::from(local_para_id).into_account_truncating(); + T::Currency::deposit_creating( + &local_sovereign_account, + T::Currency::minimum_balance().saturating_mul(DEPOSIT_MULTIPLIER.into()), + ); + + let fee = AssetPayment { asset_location: MultiLocation::new(1, Parachain(para_id)).into(), amount: 1000u128 }; + + let task_id = schedule_xcmp_tasks::(caller.clone(), None, vec![time], 1); + }: { AutomationTime::::run_xcmp_task(destination, caller, fee, call, Weight::from_parts(100_000, 0), Weight::from_parts(200_000, 0), InstructionSequence::PayThroughSovereignAccount) } + + run_auto_compound_delegated_stake_task { + let delegator: T::AccountId = account("delegator", 0, SEED); + let collator: T::AccountId = account("collator", 0, SEED); + let account_minimum = T::Currency::minimum_balance().saturating_mul(ED_MULTIPLIER.into()); + + T::Currency::deposit_creating(&delegator, (10_000_000_000_000_000_u128).saturated_into()); + T::Currency::deposit_creating(&collator, (100_000_000_000_000_000_u128).saturated_into()); + T::DelegatorActions::setup_delegator(&collator, &delegator)?; + + let (task_id, task) = schedule_auto_compound_delegated_stake_tasks::(delegator.clone(), 3600, 1).pop().unwrap(); + }: { AutomationTime::::run_auto_compound_delegated_stake_task(delegator, collator, account_minimum, &task) } + + run_dynamic_dispatch_action { + let caller: T::AccountId = account("caller", 0, SEED); + let task_id = vec![49, 45, 48, 45, 52]; + let call: ::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + let encoded_call = call.encode(); + }: { + let (_, error) = AutomationTime::::run_dynamic_dispatch_action(caller.clone(), encoded_call); + assert_eq!(error, None); + } + + run_dynamic_dispatch_action_fail_decode { + let caller: T::AccountId = account("caller", 0, SEED); + let task_id = vec![49, 45, 48, 45, 52]; + + let bad_encoded_call: Vec = vec![1]; + }: { + let (_, error) = AutomationTime::::run_dynamic_dispatch_action(caller.clone(), bad_encoded_call); + assert_eq!(error, Some(DispatchError::from(Error::::CallCannotBeDecoded))); + } + + /* + * This section is to test run_missed_tasks. + * run_missed_tasks_many_found: measuring many existing tasks for linear progression + * run_missed_tasks_many_missing: measuring many non existing tasks for linear progression + */ + run_missed_tasks_many_found { + let v in 0 .. 1; + + Timestamp::::set_timestamp(1u32.into()); // Set to non-zero default for testing + + let weight_left = Weight::from_parts(50_000_000_000, 0); + let mut missed_tasks = vec![]; + let caller: T::AccountId = account("caller", 0, SEED); + let time: u32 = 10800; + + for i in 0..v { + let task_id: Vec = vec![i.saturated_into::()]; + let task = TaskOf::::create_event_task::(caller.clone(), task_id.clone(), vec![time.into()], vec![4, 5, 6], vec![]).unwrap(); + AutomationTime::::schedule_task(&task).unwrap(); + let missed_task = MissedTaskV2Of::::new(caller.clone(), task_id.clone(), time.into()); + >::insert(caller.clone(), task_id.clone(), task); + missed_tasks.push(missed_task) + } + }: { AutomationTime::::run_missed_tasks(missed_tasks, weight_left) } + + run_missed_tasks_many_missing { + let v in 0 .. 1; + + let caller: T::AccountId = account("caller", 0, SEED); + let time: u64 = 10800; + let mut missed_tasks = vec![]; + let weight_left = Weight::from_parts(500_000_000_000, 0); + + for i in 0..v { + let task_id: Vec = vec![i.saturated_into::()]; + let missed_task = MissedTaskV2Of::::new(caller.clone(), task_id.clone(), time); + missed_tasks.push(missed_task) + } + }: { AutomationTime::::run_missed_tasks(missed_tasks, weight_left) } + + /* + * This section is to test run_tasks. + * run_tasks_many_found: tests many existing tasks for linear progression + * run_tasks_many_missing: tests many non existing tasks for linear progression + */ + run_tasks_many_found { + let v in 0 .. 1; + + Timestamp::::set_timestamp(1u32.into()); // Set to non-zero default for testing + + let weight_left = Weight::from_parts(500_000_000_000, 0); + let mut task_ids = vec![]; + let caller: T::AccountId = account("caller", 0, SEED); + let execution_time = 10800; + + for i in 0..v { + let task_id: Vec = vec![i.saturated_into::()]; + let task = TaskOf::::create_event_task::(caller.clone(), task_id.clone(), vec![execution_time], vec![65, 65.saturating_add(i as u8)], vec![]).unwrap(); + let task_id = AutomationTime::::schedule_task(&task).unwrap(); + >::insert(caller.clone(), task_id.clone(), task); + task_ids.push((caller.clone(), task_id.clone())) + } + }: { AutomationTime::::run_tasks(task_ids, weight_left) } + + run_tasks_many_missing { + let v in 0 .. 1; + let caller: T::AccountId = account("caller", 0, SEED); + let time: u64 = 10800; + let mut task_ids = vec![]; + let weight_left = Weight::from_parts(500_000_000_000, 0); + + for i in 0..v { + let task_id: Vec = vec![i.saturated_into::()]; + task_ids.push((caller.clone(), task_id)) + } + }: { AutomationTime::::run_tasks(task_ids, weight_left) } + + /* + * This section is to test update_task_queue. This only tests for 1 single missed slot. + * update_task_queue_overhead: gets overhead of fn without any items + * append_to_missed_tasks: measures appending to missed tasks + * update_task_queue_max_current_and_next: measures fn overhead with both current and future tasks + */ + + update_task_queue_overhead { + let weight_left = Weight::from_parts(500_000_000_000, 0); + }: { AutomationTime::::update_task_queue(weight_left) } + + append_to_missed_tasks { + let v in 0 .. 2; + + Timestamp::::set_timestamp(1u32.into()); // Set to non-zero default for testing + + let weight_left = Weight::from_parts(500_000_000_000, 0); + let caller: T::AccountId = account("callerName", 0, SEED); + let last_time_slot: u64 = 3600; + let time = last_time_slot; + let time_change: u64 = (v * 3600).into(); + let current_time = last_time_slot + time_change; + + for i in 0..v { + for j in 0..1 { + let time = time.saturating_add(3600); + let task_id: Vec = vec![i.saturated_into::(), j.saturated_into::()]; + let task = TaskOf::::create_event_task::(caller.clone(), task_id.clone(), vec![time], vec![4, 5, 6], vec![]).unwrap(); + AutomationTime::::schedule_task(&task).unwrap(); + >::insert(caller.clone(), task_id, task); + } + } + }: { AutomationTime::::append_to_missed_tasks(current_time, last_time_slot, weight_left) } + + update_scheduled_task_queue { + let caller: T::AccountId = account("callerName", 0, SEED); + let last_time_slot: u64 = 7200; + let current_time = 10800; + let mut task_id = vec![0u8]; + Timestamp::::set_timestamp(current_time.saturated_into::().into()); + + for i in 0..T::MaxTasksPerSlot::get() { + task_id = increment_task_id(task_id); + let task = TaskOf::::create_event_task::(caller.clone(), task_id.clone(), vec![current_time], vec![4, 5, 6], vec![]).unwrap(); + AutomationTime::::schedule_task(&task).unwrap(); + >::insert(caller.clone(), task_id.clone(), task); + } + }: { AutomationTime::::update_scheduled_task_queue(current_time, last_time_slot) } + + + shift_missed_tasks { + let caller: T::AccountId = account("callerName", 0, SEED); + let last_time_slot: u64 = 7200; + let new_time_slot: u64 = 14400; + let diff = 1; + + schedule_notify_tasks::(caller, vec![new_time_slot], T::MaxTasksPerSlot::get()); + }: { AutomationTime::::shift_missed_tasks(last_time_slot, diff) } + + impl_benchmark_test_suite!( + AutomationTime, + crate::mock::new_test_ext(crate::tests::START_BLOCK_TIME), + crate::mock::Test + ); +} diff --git a/pallets/automation-time/src/fees.rs b/pallets/automation-time/src/fees.rs new file mode 100644 index 000000000..e8cd18025 --- /dev/null +++ b/pallets/automation-time/src/fees.rs @@ -0,0 +1,511 @@ +// This file is part of Ava Protocol. + +// Copyright (C) 2022 Ava Protocol +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// ! Traits and default implementation for paying execution fees. +use crate::{AccountOf, Action, ActionOf, Config, Error, MultiBalanceOf, Pallet}; + +use frame_support::traits::Get; +use orml_traits::MultiCurrency; +use pallet_xcmp_handler::{InstructionSequence, XcmpTransactor}; +use sp_runtime::{ + traits::{CheckedSub, Convert, Saturating, Zero}, + DispatchError, DispatchResult, SaturatedConversion, + TokenError::BelowMinimum, +}; +use sp_std::marker::PhantomData; +use staging_xcm::latest::prelude::*; +use staging_xcm_builder::TakeRevenue; + +/// Handle execution fee payments in the context of automation actions +pub trait HandleFees { + fn pay_checked_fees_for Result>( + owner: &AccountOf, + action: &ActionOf, + executions: u32, + prereq: F, + ) -> Result; +} + +#[derive(Clone)] +pub struct FeePayment { + pub asset_location: Location, + pub amount: MultiBalanceOf, + pub is_local: bool, +} + +pub struct FeeHandler { + owner: T::AccountId, + pub schedule_fee: FeePayment, + pub execution_fee: Option>, + _phantom_data: PhantomData, +} + +impl HandleFees for FeeHandler +where + T: Config, + TR: TakeRevenue, +{ + fn pay_checked_fees_for Result>( + owner: &AccountOf, + action: &ActionOf, + executions: u32, + prereq: F, + ) -> Result { + let fee_handler = Self::new(owner, action, executions)?; + fee_handler + .can_pay_fee() + .map_err(|_| Error::::InsufficientBalance)?; + let outcome = prereq()?; + fee_handler.pay_fees()?; + Ok(outcome) + } +} + +impl FeeHandler +where + T: Config, + TR: TakeRevenue, +{ + fn ensure_can_withdraw( + &self, + asset_location: Location, + amount: MultiBalanceOf, + ) -> Result<(), DispatchError> { + if amount.is_zero() { + return Ok(()); + } + + let currency_id = T::CurrencyIdConvert::convert(asset_location) + .ok_or("IncoveribleMultilocation")? + .into(); + let free_balance = T::MultiCurrency::free_balance(currency_id, &self.owner); + let min_balance = T::MultiCurrency::minimum_balance(currency_id); + + free_balance + .checked_sub(&amount) + .and_then(|balance_minus_fee| balance_minus_fee.checked_sub(&min_balance)) + .ok_or(DispatchError::Token(BelowMinimum))?; + + T::MultiCurrency::ensure_can_withdraw(currency_id, &self.owner, amount)?; + + Ok(()) + } + + /// Ensure the fee can be paid. + fn can_pay_fee(&self) -> Result<(), DispatchError> { + match &self.execution_fee { + Some(exec_fee) if exec_fee.is_local => { + // If the locations of schedule_fee and execution_fee are equal, + // we need to add the fees to check whether they are sufficient, + // otherwise check them separately. + let exec_fee_location = exec_fee + .asset_location + .clone() + .reanchored(&T::SelfLocation::get(), &T::UniversalLocation::get()) + .map_err(|_| Error::::CannotReanchor)?; + + let schedule_fee_location = self + .schedule_fee + .asset_location + .clone() + .reanchored(&T::SelfLocation::get(), &T::UniversalLocation::get()) + .map_err(|_| Error::::CannotReanchor)?; + + if exec_fee_location == schedule_fee_location { + let fee = self.schedule_fee.amount.saturating_add(exec_fee.amount); + Self::ensure_can_withdraw(self, exec_fee.asset_location.clone(), fee)?; + } else { + Self::ensure_can_withdraw( + self, + self.schedule_fee.asset_location.clone(), + self.schedule_fee.amount, + )?; + Self::ensure_can_withdraw( + self, + exec_fee.asset_location.clone(), + exec_fee.amount, + )?; + } + } + _ => { + Self::ensure_can_withdraw( + self, + self.schedule_fee.asset_location.clone(), + self.schedule_fee.amount, + )?; + } + } + + Ok(()) + } + + /// Withdraw the fee. + fn withdraw_fee(&self) -> Result<(), DispatchError> { + log::debug!(target: "FeeHandler", "FeeHandler::withdraw_fee, self.schedule_fee.asset_location: {:?}, self.schedule_fee.amount: {:?}", + self.schedule_fee.asset_location, self.schedule_fee.amount); + // Withdraw schedule fee + // When the expected deduction amount, schedule_fee_amount, is not equal to zero, execute the withdrawal process; + // otherwise, there’s no need to deduct. + if !self.schedule_fee.amount.is_zero() { + let currency_id = + T::CurrencyIdConvert::convert(self.schedule_fee.asset_location.clone()) + .ok_or("InconvertibleMultilocation")?; + + T::MultiCurrency::withdraw(currency_id.into(), &self.owner, self.schedule_fee.amount) + .map_err(|_| DispatchError::Token(BelowMinimum))?; + + TR::take_revenue(Asset { + id: self.schedule_fee.asset_location.clone().into(), + fun: Fungibility::Fungible(self.schedule_fee.amount.saturated_into()), + }); + } + + // Withdraw execution fee + if let Some(execution_fee) = &self.execution_fee { + if execution_fee.is_local { + log::debug!(target: "FeeHandler", "FeeHandler::withdraw_fee, self.execution_fee.asset_location: {:?}, self.execution_fee.amount: {:?}", + execution_fee.asset_location, execution_fee.amount); + let currency_id = + T::CurrencyIdConvert::convert(execution_fee.asset_location.clone()) + .ok_or("InconvertibleMultilocation")?; + + let execution_fee_amount = execution_fee.amount; + // When the expected deduction amount, execution_fee_amount, is not equal to zero, execute the withdrawal process; + // otherwise, there’s no need to deduct. + if !execution_fee_amount.is_zero() { + T::XcmpTransactor::pay_xcm_fee( + currency_id, + self.owner.clone(), + execution_fee_amount.saturated_into(), + )?; + } + } + } + + Ok(()) + } + + /// Builds an instance of the struct + pub fn new( + owner: &AccountOf, + action: &ActionOf, + executions: u32, + ) -> Result { + let schedule_fee_location = action.schedule_fee_location::(); + + let schedule_fee_amount: u128 = + Pallet::::calculate_schedule_fee_amount(action, executions)?.saturated_into(); + + let schedule_fee = FeePayment { + asset_location: schedule_fee_location, + amount: schedule_fee_amount.saturated_into(), + is_local: true, + }; + + let execution_fee = match action.clone() { + Action::XCMP { + execution_fee, + instruction_sequence, + .. + } => { + let location = Location::try_from(execution_fee.asset_location) + .map_err(|()| Error::::BadVersion)?; + let amount = execution_fee + .amount + .saturating_mul(executions.into()) + .saturated_into(); + Some(FeePayment { + asset_location: location, + amount, + is_local: instruction_sequence + == InstructionSequence::PayThroughSovereignAccount, + }) + } + _ => None, + }; + + Ok(Self { + owner: owner.clone(), + schedule_fee, + execution_fee, + _phantom_data: Default::default(), + }) + } + + /// Executes the fee handler + fn pay_fees(self) -> DispatchResult { + // This should never error if can_pay_fee passed. + self.withdraw_fee() + .map_err(|_| Error::::LiquidityRestrictions)?; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{mock::*, Action, AssetPayment, Weight}; + use frame_support::{assert_err, sp_runtime::AccountId32}; + use frame_system::RawOrigin; + use parity_scale_codec::Encode; + use staging_xcm::latest::prelude::Junctions::*; + + #[test] + fn pay_checked_fees_for_success() { + new_test_ext(0).execute_with(|| { + let alice = AccountId32::new(ALICE); + fund_account(&alice, 900_000_000, 1, Some(0)); + let starting_funds = Balances::free_balance(alice.clone()); + + let call: ::RuntimeCall = + frame_system::Call::remark_with_event { remark: vec![50] }.into(); + let mut has_callback_run = false; + let result = ::FeeHandler::pay_checked_fees_for( + &alice, + &Action::DynamicDispatch { + encoded_call: call.encode(), + }, + 1, + || { + has_callback_run = true; + Ok("called") + }, + ); + assert_eq!(result.expect("success"), "called"); + assert!(has_callback_run); + assert!(starting_funds > Balances::free_balance(alice)) + }) + } + + #[test] + fn call_pay_checked_fees_for_with_normal_flow_and_enough_execution_fee_success() { + new_test_ext(0).execute_with(|| { + let destination = Location::new(1, X1([Parachain(PARA_ID)].into())); + let alice = AccountId32::new(ALICE); + let mut has_callback_run = false; + get_multi_xcmp_funds(alice.clone()); + + let action = Action::XCMP { + destination: destination.clone(), + schedule_fee: NATIVE_LOCATION, + execution_fee: Box::new(AssetPayment { + asset_location: destination.into(), + amount: 10, + }), + encoded_call: vec![3, 4, 5], + encoded_call_weight: Weight::from_parts(100_000, 0), + overall_weight: Weight::from_parts(200_000, 0), + schedule_as: None, + instruction_sequence: InstructionSequence::PayThroughSovereignAccount, + }; + + let result = ::FeeHandler::pay_checked_fees_for( + &alice, + &action, + 1, + || { + has_callback_run = true; + Ok("called") + }, + ); + assert_eq!(result.expect("success"), "called"); + assert!(has_callback_run); + }) + } + + #[test] + fn call_pay_checked_fees_for_with_normal_flow_and_foreign_schedule_fee_success() { + new_test_ext(0).execute_with(|| { + let destination = Location::new(1, X1([Parachain(PARA_ID)].into())); + let alice = AccountId32::new(ALICE); + let mut has_callback_run = false; + let _ = Currencies::update_balance( + RawOrigin::Root.into(), + alice.clone(), + FOREIGN_CURRENCY_ID, + XmpFee::get() as i64, + ); + fund_account(&alice, 900_000_000, 1, Some(0)); + + let action = Action::XCMP { + destination: destination.clone(), + schedule_fee: destination, + execution_fee: Box::new(AssetPayment { + asset_location: NATIVE_LOCATION.into(), + amount: 10, + }), + encoded_call: vec![3, 4, 5], + encoded_call_weight: Weight::from_parts(100_000, 0), + overall_weight: Weight::from_parts(200_000, 0), + schedule_as: None, + instruction_sequence: InstructionSequence::PayThroughSovereignAccount, + }; + + let result = ::FeeHandler::pay_checked_fees_for( + &alice, + &action, + 1, + || { + has_callback_run = true; + Ok("called") + }, + ); + assert_eq!(result.expect("success"), "called"); + assert!(has_callback_run); + }) + } + + #[test] + fn call_pay_checked_fees_for_with_normal_flow_and_foreign_schedule_fee_will_throw_insufficent_balance( + ) { + new_test_ext(0).execute_with(|| { + let destination = Location::new(1, X1([Parachain(PARA_ID)].into())); + let alice = AccountId32::new(ALICE); + fund_account(&alice, 900_000_000, 1, Some(0)); + + let action = Action::XCMP { + destination: destination.clone(), + schedule_fee: destination, + execution_fee: Box::new(AssetPayment { + asset_location: NATIVE_LOCATION.into(), + amount: 10, + }), + encoded_call: vec![3, 4, 5], + encoded_call_weight: Weight::from_parts(100_000, 0), + overall_weight: Weight::from_parts(200_000, 0), + schedule_as: None, + instruction_sequence: InstructionSequence::PayThroughSovereignAccount, + }; + + let result = ::FeeHandler::pay_checked_fees_for( + &alice, + &action, + 1, + || Ok(()), + ); + assert_err!(result, Error::::InsufficientBalance); + }) + } + + #[test] + fn call_pay_checked_fees_for_with_normal_flow_and_insufficent_execution_fee_will_fail() { + new_test_ext(0).execute_with(|| { + let destination = Location::new(1, X1([Parachain(PARA_ID)].into())); + let alice = AccountId32::new(ALICE); + fund_account(&alice, 900_000_000, 1, Some(0)); + + let action = Action::XCMP { + destination: destination.clone(), + schedule_fee: NATIVE_LOCATION, + execution_fee: Box::new(AssetPayment { + asset_location: destination.into(), + amount: 10, + }), + encoded_call: vec![3, 4, 5], + encoded_call_weight: Weight::from_parts(100_000, 0), + overall_weight: Weight::from_parts(200_000, 0), + schedule_as: None, + instruction_sequence: InstructionSequence::PayThroughSovereignAccount, + }; + + let result = ::FeeHandler::pay_checked_fees_for( + &alice, + &action, + 1, + || Ok(()), + ); + assert_err!(result, Error::::InsufficientBalance); + }) + } + + #[test] + fn call_pay_checked_fees_for_with_alternate_flow_and_no_execution_fee_success() { + new_test_ext(0).execute_with(|| { + let destination = Location::new(1, X1([Parachain(PARA_ID)].into())); + let alice = AccountId32::new(ALICE); + let mut has_callback_run = false; + fund_account(&alice, 900_000_000, 1, Some(0)); + + let action = Action::XCMP { + destination: destination.clone(), + schedule_fee: NATIVE_LOCATION, + execution_fee: Box::new(AssetPayment { + asset_location: destination.into(), + amount: 10, + }), + encoded_call: vec![3, 4, 5], + encoded_call_weight: Weight::from_parts(100_000, 0), + overall_weight: Weight::from_parts(200_000, 0), + schedule_as: None, + instruction_sequence: InstructionSequence::PayThroughRemoteDerivativeAccount, + }; + + let result = ::FeeHandler::pay_checked_fees_for( + &alice, + &action, + 1, + || { + has_callback_run = true; + Ok("called") + }, + ); + assert_eq!(result.expect("success"), "called"); + assert!(has_callback_run); + }) + } + + #[test] + fn errors_when_not_enough_funds_for_fee() { + new_test_ext(0).execute_with(|| { + let alice = AccountId32::new(ALICE); + let call: ::RuntimeCall = + frame_system::Call::remark_with_event { remark: vec![50] }.into(); + let result = ::FeeHandler::pay_checked_fees_for( + &alice, + &Action::DynamicDispatch { + encoded_call: call.encode(), + }, + 1, + || Ok(()), + ); + assert_err!(result, Error::::InsufficientBalance); + }) + } + + #[test] + fn does_not_charge_fees_when_prereq_errors() { + new_test_ext(0).execute_with(|| { + let alice = AccountId32::new(ALICE); + fund_account(&alice, 900_000_000, 1, Some(0)); + + let starting_funds = Balances::free_balance(alice.clone()); + let call: ::RuntimeCall = + frame_system::Call::remark_with_event { remark: vec![50] }.into(); + + let result = ::FeeHandler::pay_checked_fees_for::<(), _>( + &alice, + &Action::DynamicDispatch { + encoded_call: call.encode(), + }, + 1, + || Err("error".into()), + ); + assert_err!(result, DispatchError::Other("error")); + assert_eq!(starting_funds, Balances::free_balance(alice)) + }) + } +} diff --git a/pallets/automation-time/src/lib.rs b/pallets/automation-time/src/lib.rs new file mode 100644 index 000000000..0c30f09db --- /dev/null +++ b/pallets/automation-time/src/lib.rs @@ -0,0 +1,1483 @@ +// This file is part of Ava Protocol. + +// Copyright (C) 2022 Ava Protocol +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Automation time pallet +//! +//! This pallet allows a user to schedule tasks. Tasks can scheduled for any whole SlotSizeSeconds in the future. +//! In order to run tasks this pallet consumes up to a certain amount of weight during `on_initialize`. +//! +//! The pallet supports the following tasks: +//! * On-chain events with custom text +//! + +#![cfg_attr(not(feature = "std"), no_std)] +pub use pallet::*; + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +// mod benchmarking; +pub mod weights; + +mod fees; +pub use fees::*; + +// mod autocompounding; +// pub use autocompounding::*; + +mod types; +pub use types::*; + +use core::convert::TryInto; +use frame_support::{ + dispatch::{GetDispatchInfo, PostDispatchInfo}, + pallet_prelude::*, + storage::{ + with_transaction, + TransactionOutcome::{Commit, Rollback}, + }, + traits::{Contains, Currency, IsSubType, OriginTrait}, + weights::constants::WEIGHT_REF_TIME_PER_SECOND, +}; +use frame_system::pallet_prelude::*; +use orml_traits::{location::Reserve, FixedConversionRateProvider, MultiCurrency}; +use parity_scale_codec::Decode; +// use pallet_parachain_staking::DelegatorActions; +use ava_protocol_primitives::EnsureProxy; +use pallet_timestamp::{self as timestamp}; +pub use pallet_xcmp_handler::InstructionSequence; +use pallet_xcmp_handler::XcmpTransactor; +use scale_info::{prelude::format, TypeInfo}; +use sp_runtime::{ + traits::{CheckedConversion, Convert, Dispatchable, SaturatedConversion, Saturating}, + ArithmeticError, DispatchError, Perbill, +}; +use sp_std::{boxed::Box, collections::btree_map::BTreeMap, vec, vec::Vec}; +use staging_xcm::{latest::prelude::*, VersionedLocation}; +pub use weights::WeightInfo; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + pub type AccountOf = ::AccountId; + pub type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + pub type MultiBalanceOf = <::MultiCurrency as MultiCurrency< + ::AccountId, + >>::Balance; + + pub type TaskIdV2 = Vec; + + pub type AccountTaskId = (AccountOf, TaskIdV2); + pub type ActionOf = Action>; + pub type TaskOf = types::Task>; + pub type MissedTaskV2Of = MissedTaskV2, TaskIdV2>; + pub type ScheduledTasksOf = ScheduledTasks, TaskIdV2>; + pub type MultiCurrencyId = <::MultiCurrency as MultiCurrency< + ::AccountId, + >>::CurrencyId; + + #[pallet::config] + pub trait Config: frame_system::Config + pallet_timestamp::Config { + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// Weight information for the extrinsics in this module. + type WeightInfo: WeightInfo; + + /// The maximum number of tasks that can be scheduled for a time slot. + #[pallet::constant] + type MaxTasksPerSlot: Get; + + /// The maximum number of times that a task can be scheduled for. + #[pallet::constant] + type MaxExecutionTimes: Get; + + /// The farthest out a task can be scheduled. + #[pallet::constant] + type MaxScheduleSeconds: Get; + + /// The maximum weight per block. + #[pallet::constant] + type MaxBlockWeight: Get; + + /// The maximum percentage of weight per block used for scheduled tasks. + #[pallet::constant] + type MaxWeightPercentage: Get; + + /// The maximum supported execution weight per automation slot + #[pallet::constant] + type MaxWeightPerSlot: Get; + + /// The minimum time interval tasks could schedule for. For example, if the value is 600, then only inputs that are multiples of 600 are allowed. In other words, tasks can only be scheduled at 0, 10, 20 ... minutes of each hour. + #[pallet::constant] + type SlotSizeSeconds: Get; + + /// The maximum percentage of weight per block used for scheduled tasks. + #[pallet::constant] + type UpdateQueueRatio: Get; + + #[pallet::constant] + type ExecutionWeightFee: Get>; + + /// The Currency type for interacting with balances + type Currency: Currency; + + /// The MultiCurrency type for interacting with balances + type MultiCurrency: MultiCurrency; + + /// The currencyIds that our chain supports. + type CurrencyId: Parameter + + Member + + Copy + + MaybeSerializeDeserialize + + Ord + + TypeInfo + + MaxEncodedLen + + From> + + Into> + + From; + + /// Utility for sending XCM messages + type XcmpTransactor: XcmpTransactor; + + /// Converts CurrencyId to Multiloc + type CurrencyIdConvert: Convert> + + Convert>; + + /// Converts between comparable currencies + type FeeConversionRateProvider: FixedConversionRateProvider; + + /// Handler for fees + type FeeHandler: HandleFees; + + // type DelegatorActions: DelegatorActions>; + + /// The overarching call type. + type RuntimeCall: Parameter + + Dispatchable + + GetDispatchInfo + + From> + + IsSubType> + + IsType<::RuntimeCall> + + From>; + + type ScheduleAllowList: Contains<::RuntimeCall>; + + /// Ensure proxy + type EnsureProxy: ava_protocol_primitives::EnsureProxy; + + /// This chain's Universal Location. + type UniversalLocation: Get; + + /// The way to retreave the reserve of a Asset. This can be + /// configured to accept absolute or relative paths for self tokens + type ReserveProvider: Reserve; + + /// Self chain location. + #[pallet::constant] + type SelfLocation: Get; + } + + const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); + + #[pallet::pallet] + #[pallet::without_storage_info] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(_); + + #[pallet::storage] + #[pallet::getter(fn get_scheduled_tasks)] + pub type ScheduledTasksV3 = + StorageMap<_, Twox64Concat, UnixTime, ScheduledTasksOf>; + + #[pallet::storage] + #[pallet::getter(fn get_account_task)] + pub type AccountTasks = + StorageDoubleMap<_, Twox64Concat, AccountOf, Twox64Concat, TaskIdV2, TaskOf>; + + #[pallet::storage] + #[pallet::getter(fn get_task_queue)] + pub type TaskQueueV2 = StorageValue<_, Vec>, ValueQuery>; + + #[pallet::storage] + #[pallet::getter(fn get_missed_queue)] + pub type MissedQueueV2 = StorageValue<_, Vec>, ValueQuery>; + + #[pallet::storage] + #[pallet::getter(fn get_last_slot)] + // NOTE: The 2 UnixTime stamps represent (last_time_slot, last_missed_slot). + // `last_time_slot` represents the last time slot that the task queue was updated. + // `last_missed_slot` represents the last scheduled slot where the missed queue has checked for missed tasks. + pub type LastTimeSlot = StorageValue<_, (UnixTime, UnixTime)>; + + #[pallet::storage] + #[pallet::getter(fn is_shutdown)] + pub type Shutdown = StorageValue<_, bool, ValueQuery>; + + #[pallet::error] + #[derive(PartialEq)] + pub enum Error { + /// Time in seconds must be a multiple of SlotSizeSeconds + InvalidTime, + /// Time must be in the future. + PastTime, + /// Time cannot be too far in the future. + TimeTooFarOut, + /// There can be no duplicate tasks. + DuplicateTask, + /// Time slot is full. No more tasks can be scheduled for this time. + TimeSlotFull, + /// The task does not exist. + TaskDoesNotExist, + /// The task schedule_as does not match. + TaskScheduleAsNotMatch, + /// Block time not set. + BlockTimeNotSet, + /// Insufficient balance to pay execution fee. + InsufficientBalance, + /// Account liquidity restrictions prevent withdrawal. + LiquidityRestrictions, + /// Too many execution times provided. + TooManyExecutionsTimes, + /// The call can no longer be decoded. + CallCannotBeDecoded, + /// Incoverible currency ID. + IncoveribleCurrencyId, + /// The version of the `VersionedMultiLocation` value used is not able + /// to be interpreted. + BadVersion, + // The fee payment asset location is not supported. + UnsupportedFeePayment, + // Mulilocation cannot be reanchored. + CannotReanchor, + /// Invalid asset location. + InvalidAssetLocation, + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Schedule task success. + TaskScheduled { + who: AccountOf, + task_id: TaskIdV2, + schedule_as: Option>, + encoded_call: Option>, + }, + /// Cancelled a task. + TaskCancelled { + who: AccountOf, + task_id: TaskIdV2, + }, + /// A Task was not found. + TaskNotFound { + who: AccountOf, + task_id: TaskIdV2, + }, + /// The task could not be run at the scheduled time. + TaskMissed { + who: AccountOf, + task_id: TaskIdV2, + execution_time: UnixTime, + }, + /// A recurring task was rescheduled + TaskRescheduled { + who: AccountOf, + task_id: TaskIdV2, + schedule_as: Option>, + }, + /// A recurring task was not rescheduled + TaskNotRescheduled { + who: AccountOf, + task_id: TaskIdV2, + error: DispatchError, + }, + /// A recurring task attempted but failed to be rescheduled + TaskRescheduleFailed { + who: AccountOf, + task_id: TaskIdV2, + error: DispatchError, + }, + TaskCompleted { + who: AccountOf, + task_id: TaskIdV2, + }, + TaskTriggered { + who: AccountOf, + task_id: TaskIdV2, + condition: BTreeMap, Vec>, + }, + TaskExecuted { + who: AccountOf, + task_id: TaskIdV2, + }, + TaskExecutionFailed { + who: AccountOf, + task_id: TaskIdV2, + error: DispatchError, + }, + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(_: BlockNumberFor) -> Weight { + if Self::is_shutdown() { + return T::DbWeight::get().reads(1u64); + } + + let max_weight: Weight = Weight::from_parts( + T::MaxWeightPercentage::get().mul_floor(T::MaxBlockWeight::get()), + 0, + ); + + Self::trigger_tasks(max_weight) + } + } + + #[pallet::call] + impl Pallet { + /// Schedule a task through XCMP to fire an XCMP message with a provided call. + /// + /// Before the task can be scheduled the task must past validation checks. + /// * The transaction is signed + /// * The times are valid + /// * The given asset location is supported + /// + /// # Parameters + /// * `schedule`: The triggering rules for recurring task or the list of unix standard times in seconds for when the task should run. + /// * `destination`: Destination the XCMP call will be sent to. + /// * `schedule_fee`: The payment asset location required for scheduling automation task. + /// * `execution_fee`: The fee will be paid for XCMP execution. + /// * `encoded_call`: Call that will be sent via XCMP to the parachain id provided. + /// * `encoded_call_weight`: Required weight at most the provided call will take. + /// * `overall_weight`: The overall weight in which fees will be paid for XCM instructions. + /// * `instruction_sequence`: The instruction sequence for the XCM call. + /// * `schedule_as`: The real task executor. If it is None, the caller will be the executor. + /// + /// # Errors + /// * `InvalidTime`: Time in seconds must be a multiple of SlotSizeSeconds. + /// * `PastTime`: Time must be in the future. + /// * `DuplicateTask`: There can be no duplicate tasks. + /// * `TimeTooFarOut`: Execution time or frequency are past the max time horizon. + /// * `TimeSlotFull`: Time slot is full. No more tasks can be scheduled for this time. + /// * `UnsupportedFeePayment`: Unsupported fee payment. + /// * `InvalidAssetLocation` Invalid asset location. + #[pallet::call_index(1)] + #[pallet::weight( + ::WeightInfo::schedule_xcmp_task_full(schedule.number_of_executions()) + .saturating_add(T::DbWeight::get().reads(if schedule_as.is_some() { 1 } else { 0 })) + )] + pub fn schedule_xcmp_task( + origin: OriginFor, + schedule: ScheduleParam, + destination: Box, + schedule_fee: Box, + execution_fee: Box, + encoded_call: Vec, + encoded_call_weight: Weight, + overall_weight: Weight, + instruction_sequence: InstructionSequence, + schedule_as: Option, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + + // Make sure the owner is the proxy account of the user account. + if let Some(schedule_as_account) = schedule_as.clone() { + T::EnsureProxy::ensure_ok(schedule_as_account, who.clone())?; + } + + let destination_location = + Location::try_from(*destination.clone()).map_err(|()| Error::::BadVersion)?; + let schedule_fee_location = + Location::try_from(*schedule_fee.clone()).map_err(|()| Error::::BadVersion)?; + + let execution_fee_payment: AssetPayment = *execution_fee.clone(); + let execution_fee_location = + Location::try_from(execution_fee_payment.clone().asset_location) + .map_err(|()| Error::::BadVersion)?; + + Self::ensure_supported_execution_fee_location( + &execution_fee_location, + &destination_location, + )?; + + let action = Action::XCMP { + destination: destination_location, + schedule_fee: schedule_fee_location, + execution_fee: Box::new(execution_fee_payment), + encoded_call: encoded_call.clone(), + encoded_call_weight, + overall_weight, + schedule_as: schedule_as.clone(), + instruction_sequence, + }; + + // Convert the call into a runtime call + let call: ::RuntimeCall = Call::schedule_xcmp_task { + schedule: schedule.clone(), + destination, + schedule_fee, + execution_fee, + encoded_call, + encoded_call_weight, + overall_weight, + instruction_sequence, + schedule_as, + } + .into(); + + // Schedule the task. + Self::schedule_task_with_event( + action, + who, + schedule.validated_into::()?, + vec![], + Some(call.encode()), + )?; + + Ok(()) + } + + /// Schedule a task that will dispatch a call. + /// ** This is currently limited to calls from the System and Balances pallets. + /// + /// # Parameters + /// * `execution_times`: The list of unix standard times in seconds for when the task should run. + /// * `call`: The call that will be dispatched. + /// + /// # Errors + /// * `InvalidTime`: Execution time and frequency must be a multiple of SlotSizeSeconds. + /// * `PastTime`: Time must be in the future. + /// * `DuplicateTask`: There can be no duplicate tasks. + /// * `TimeSlotFull`: Time slot is full. No more tasks can be scheduled for this time. + /// * `TimeTooFarOut`: Execution time or frequency are past the max time horizon. + #[pallet::call_index(3)] + #[pallet::weight(::WeightInfo::schedule_dynamic_dispatch_task_full(schedule.number_of_executions()))] + pub fn schedule_dynamic_dispatch_task( + origin: OriginFor, + schedule: ScheduleParam, + call: Box<::RuntimeCall>, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + + let encoded_call = call.encode(); + let action = Action::DynamicDispatch { + encoded_call: encoded_call.clone(), + }; + let schedule = schedule.validated_into::()?; + + Self::schedule_task_with_event(action, who, schedule, vec![], Some(encoded_call))?; + + Ok(()) + } + + /// Cancel a task. + /// + /// Tasks can only can be cancelled by their owners. + /// + /// # Parameters + /// * `task_id`: The id of the task. + /// + /// # Errors + /// * `TaskDoesNotExist`: The task does not exist. + #[pallet::call_index(4)] + #[pallet::weight(::WeightInfo::cancel_scheduled_task_full())] + pub fn cancel_task(origin: OriginFor, task_id: TaskIdV2) -> DispatchResult { + let who = ensure_signed(origin)?; + + AccountTasks::::get(who, task_id.clone()) + .ok_or(Error::::TaskDoesNotExist) + .map(|task| Self::remove_task(task_id.clone(), task))?; + + Ok(()) + } + + /// Sudo can force cancel a task. + /// + /// # Parameters + /// * `owner_id`: The owner of the task. + /// * `task_id`: The id of the task. + /// + /// # Errors + /// * `TaskDoesNotExist`: The task does not exist. + #[pallet::call_index(5)] + #[pallet::weight(::WeightInfo::force_cancel_scheduled_task_full())] + pub fn force_cancel_task( + origin: OriginFor, + owner_id: AccountOf, + task_id: TaskIdV2, + ) -> DispatchResult { + ensure_root(origin)?; + + AccountTasks::::get(owner_id, task_id.clone()) + .ok_or(Error::::TaskDoesNotExist) + .map(|task| Self::remove_task(task_id.clone(), task))?; + + Ok(()) + } + + /// Cancel task by schedule_as + /// + /// # Parameters + /// * `schedule_as`: The schedule_as account of the task. + /// * `task_id`: The id of the task. + /// + /// # Errors + /// * `TaskDoesNotExist`: The task does not exist. + /// * `TaskScheduleAsNotMatch`: The schedule_as account of the task does not match. + #[pallet::call_index(6)] + #[pallet::weight(::WeightInfo::cancel_task_with_schedule_as_full())] + pub fn cancel_task_with_schedule_as( + origin: OriginFor, + owner_id: AccountOf, + task_id: TaskIdV2, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + + let task = AccountTasks::::get(owner_id, task_id.clone()) + .ok_or(Error::::TaskDoesNotExist)?; + + if !matches!(task.action, Action::XCMP { schedule_as: Some(ref s), .. } if s == &who) { + return Err(Error::::TaskScheduleAsNotMatch.into()); + } + + Self::remove_task(task_id, task); + Ok(()) + } + } + + impl Pallet { + /// Based on the block time, return the time slot. + /// + /// In order to do this we: + /// * Get the most recent timestamp from the block. + /// * Convert the ms unix timestamp to seconds. + /// * Bring the timestamp down to the last whole SlotSizeSeconds. + pub fn get_current_time_slot() -> Result { + let now = >::get() + .checked_into::() + .ok_or(ArithmeticError::Overflow)?; + + if now == 0 { + Err(Error::::BlockTimeNotSet)? + } + + let now = now.checked_div(1000).ok_or(ArithmeticError::Overflow)?; + let diff_to_slot = now + .checked_rem(T::SlotSizeSeconds::get()) + .ok_or(ArithmeticError::Overflow)?; + Ok(now + .checked_sub(diff_to_slot) + .ok_or(ArithmeticError::Overflow)?) + } + + /// Checks to see if the scheduled time is valid. + /// + /// In order for a time to be valid it must + /// - A multiple of SlotSizeSeconds + /// - Be in the future + /// - Not be more than MaxScheduleSeconds out + pub fn is_valid_time(scheduled_time: UnixTime) -> DispatchResult { + #[cfg(feature = "dev-queue")] + if scheduled_time == 0 { + return Ok(()); + } + + let remainder = scheduled_time + .checked_rem(T::SlotSizeSeconds::get()) + .ok_or(ArithmeticError::Overflow)?; + if remainder != 0 { + Err(>::InvalidTime)?; + } + + let current_time_slot = Self::get_current_time_slot()?; + if scheduled_time <= current_time_slot { + Err(>::PastTime)?; + } + + let max_schedule_time = current_time_slot + .checked_add(T::MaxScheduleSeconds::get()) + .ok_or(ArithmeticError::Overflow)?; + + if scheduled_time > max_schedule_time { + Err(Error::::TimeTooFarOut)?; + } + + Ok(()) + } + + /// Cleans the executions times by removing duplicates and putting in ascending order. + pub fn clean_execution_times_vector(execution_times: &mut Vec) { + execution_times.sort_unstable(); + execution_times.dedup(); + } + + /// Trigger tasks for the block time. + /// + /// Complete as many tasks as possible given the maximum weight. + pub fn trigger_tasks(max_weight: Weight) -> Weight { + let mut weight_left: Weight = max_weight; + + // The last_missed_slot might not be caught up within just 1 block. + // It might take multiple blocks to fully catch up, so we limit update to a max weight. + let max_update_weight: Weight = Weight::from_parts( + T::UpdateQueueRatio::get().mul_floor(weight_left.ref_time()), + 0, + ); + + let update_weight = Self::update_task_queue(max_update_weight); + + weight_left = weight_left.saturating_sub(update_weight); + + // need to calculate the weight of running just 1 task below. + let run_task_weight = ::WeightInfo::run_tasks_many_found(1) + .saturating_add(T::DbWeight::get().reads(1u64)) + .saturating_add(T::DbWeight::get().writes(1u64)); + + if weight_left.ref_time() < run_task_weight.ref_time() { + return weight_left; + } + + // run as many scheduled tasks as we can + let task_queue = Self::get_task_queue(); + weight_left = weight_left.saturating_sub(T::DbWeight::get().reads(1u64)); + if !task_queue.is_empty() { + let (tasks_left, new_weight_left) = Self::run_tasks(task_queue, weight_left); + TaskQueueV2::::put(tasks_left); + weight_left = new_weight_left.saturating_sub(T::DbWeight::get().writes(1u64)); + } + + // if there is weight left we need to handled the missed tasks + let run_missed_task_weight = ::WeightInfo::run_missed_tasks_many_found(1) + .saturating_add(T::DbWeight::get().reads(1u64)) + .saturating_add(T::DbWeight::get().writes(1u64)); + if weight_left.ref_time() >= run_missed_task_weight.ref_time() { + let missed_queue = Self::get_missed_queue(); + weight_left = weight_left.saturating_sub(T::DbWeight::get().reads(1u64)); + if !missed_queue.is_empty() { + let (tasks_left, new_weight_left) = + Self::run_missed_tasks(missed_queue, weight_left); + + MissedQueueV2::::put(tasks_left); + weight_left = new_weight_left.saturating_sub(T::DbWeight::get().writes(1u64)); + } + } + + max_weight.saturating_sub(weight_left) + } + + /// Update the task queue. + /// + /// This function checks to see if we are in a new time slot, and if so it updates the task queue and missing queue by doing the following. + /// 1. (update_scheduled_task_queue) If new slot, append the current task queue to the missed queue and remove tasks from task queue. + /// 2. (update_scheduled_task_queue) Move all tasks from the new slot into the task queue and remove the slot from Scheduled tasks map. + /// 3. (update_missed_queue) If we skipped any time slots (due to an outage) move those tasks to the missed queue. + /// 4. (update_missed_queue) Remove all missed time slots that were moved to missed queue from the Scheduled tasks map. + /// + pub fn update_task_queue(allotted_weight: Weight) -> Weight { + let mut total_weight = ::WeightInfo::update_task_queue_overhead(); + + let current_time_slot = match Self::get_current_time_slot() { + Ok(time_slot) => time_slot, + Err(_) => return total_weight, + }; + + if let Some((last_time_slot, last_missed_slot)) = Self::get_last_slot() { + let missed_queue_allotted_weight = allotted_weight + .saturating_sub(T::DbWeight::get().reads(1u64)) + .saturating_sub(T::DbWeight::get().writes(1u64)) + .saturating_sub(::WeightInfo::update_scheduled_task_queue()); + let (updated_last_time_slot, scheduled_queue_update_weight) = + Self::update_scheduled_task_queue(current_time_slot, last_time_slot); + let (updated_last_missed_slot, missed_queue_update_weight) = + Self::update_missed_queue( + current_time_slot, + last_missed_slot, + missed_queue_allotted_weight, + ); + + LastTimeSlot::::put((updated_last_time_slot, updated_last_missed_slot)); + total_weight = total_weight + .saturating_add(missed_queue_update_weight) + .saturating_add(scheduled_queue_update_weight) + .saturating_add(T::DbWeight::get().reads(1u64)); + } else { + LastTimeSlot::::put((current_time_slot, current_time_slot)); + total_weight = total_weight + .saturating_add(T::DbWeight::get().writes(1u64)) + .saturating_add(T::DbWeight::get().reads(1u64)); + } + + total_weight + } + + /// Update the task queue with scheduled tasks for the current slot + /// + /// 1. If new slot, append the current task queue to the missed queue and remove tasks from task queue. + /// 2. Move all tasks from the new slot into the task queue and remove the slot from Scheduled tasks map. + pub fn update_scheduled_task_queue( + current_time_slot: u64, + last_time_slot: u64, + ) -> (u64, Weight) { + if current_time_slot != last_time_slot { + let missed_tasks = Self::get_task_queue(); + let mut missed_queue = Self::get_missed_queue(); + for (account_id, task_id) in missed_tasks { + let new_missed_task = + MissedTaskV2Of::::new(account_id, task_id, last_time_slot); + missed_queue.push(new_missed_task); + } + MissedQueueV2::::put(missed_queue); + // move current time slot to task queue or clear the task queue + if let Some(ScheduledTasksOf:: { + tasks: account_task_ids, + .. + }) = Self::get_scheduled_tasks(current_time_slot) + { + TaskQueueV2::::put(account_task_ids); + ScheduledTasksV3::::remove(current_time_slot); + } else { + let empty_queue: Vec> = vec![]; + TaskQueueV2::::put(empty_queue); + } + } + let weight_used = ::WeightInfo::update_scheduled_task_queue(); + (current_time_slot, weight_used) + } + + /// Checks if append_to_missed_tasks needs to run and then runs and measures weight as needed + pub fn update_missed_queue( + current_time_slot: u64, + last_missed_slot: u64, + allotted_weight: Weight, + ) -> (u64, Weight) { + if current_time_slot != last_missed_slot { + // will need to move missed time slots into missed queue + let (append_weight, missed_slots_moved) = Self::append_to_missed_tasks( + current_time_slot, + last_missed_slot, + allotted_weight, + ); + + let last_missed_slot_tracker = last_missed_slot + .saturating_add(missed_slots_moved.saturating_mul(T::SlotSizeSeconds::get())); + let used_weight = append_weight; + (last_missed_slot_tracker, used_weight) + } else { + (last_missed_slot, Weight::zero()) + } + } + + /// Checks each previous time slots to move any missed tasks into the missed_queue + /// + /// 1. If we skipped any time slots (due to an outage) move those tasks to the missed queue. + /// 2. Remove all missed time slots that were moved to missed queue from the Scheduled tasks map. + pub fn append_to_missed_tasks( + current_time_slot: UnixTime, + last_missed_slot: UnixTime, + mut allotted_weight: Weight, + ) -> (Weight, u64) { + // will need to move task queue into missed queue + let mut missed_tasks = vec![]; + let mut diff = (current_time_slot.saturating_sub(last_missed_slot) + / T::SlotSizeSeconds::get()) + .saturating_sub(1); + for i in 0..diff { + if allotted_weight.ref_time() + < ::WeightInfo::shift_missed_tasks().ref_time() + { + diff = i; + break; + } + let mut slot_missed_tasks = Self::shift_missed_tasks(last_missed_slot, i); + missed_tasks.append(&mut slot_missed_tasks); + allotted_weight = + allotted_weight.saturating_sub(::WeightInfo::shift_missed_tasks()); + } + // Update the missed queue + let mut missed_queue = Self::get_missed_queue(); + missed_queue.append(&mut missed_tasks); + MissedQueueV2::::put(missed_queue); + + let weight = ::WeightInfo::append_to_missed_tasks(diff.saturated_into()); + (weight, diff) + } + + /// Grabs all of the missed tasks from a time slot. + /// The time slot to grab missed tasks is calculated given: + /// 1. last missed slot that was stored + /// 2. the number of slots that it should skip after that + pub fn shift_missed_tasks( + last_missed_slot: UnixTime, + number_of_missed_slots: u64, + ) -> Vec> { + let mut tasks = vec![]; + let seconds_in_slot = T::SlotSizeSeconds::get(); + let shift = seconds_in_slot.saturating_mul(number_of_missed_slots + 1); + let new_time_slot = last_missed_slot.saturating_add(shift); + if let Some(ScheduledTasksOf:: { + tasks: account_task_ids, + .. + }) = Self::get_scheduled_tasks(new_time_slot) + { + ScheduledTasksV3::::remove(new_time_slot); + for (account_id, task_id) in account_task_ids { + let new_missed_task = + MissedTaskV2Of::::new(account_id, task_id, new_time_slot); + tasks.push(new_missed_task); + } + } + tasks + } + + /// Runs as many tasks as the weight allows from the provided vec of task_ids. + /// + /// Returns a vec with the tasks that were not run and the remaining weight. + pub fn run_tasks( + mut account_task_ids: Vec>, + mut weight_left: Weight, + ) -> (Vec>, Weight) { + let mut consumed_task_index: usize = 0; + + let time_slot = Self::get_current_time_slot(); + if time_slot.is_err() { + return (account_task_ids, weight_left); + } + let time_slot = time_slot.unwrap(); + + for (account_id, task_id) in account_task_ids.iter() { + consumed_task_index.saturating_inc(); + let action_weight = match AccountTasks::::get(account_id.clone(), task_id) { + None => { + Self::deposit_event(Event::TaskNotFound { + who: account_id.clone(), + task_id: task_id.clone(), + }); + ::WeightInfo::run_tasks_many_missing(1) + } + Some(task) => { + let mut condition: BTreeMap, Vec> = BTreeMap::new(); + condition.insert("type".as_bytes().to_vec(), "time".as_bytes().to_vec()); + condition.insert( + "timestamp".as_bytes().to_vec(), + format!("{}", time_slot).into_bytes(), + ); + + Self::deposit_event(Event::TaskTriggered { + who: account_id.clone(), + task_id: task_id.clone(), + condition, + }); + + let (task_action_weight, dispatch_error) = match task.action.clone() { + Action::XCMP { + destination, + execution_fee, + schedule_as, + encoded_call, + encoded_call_weight, + overall_weight, + instruction_sequence, + .. + } => Self::run_xcmp_task( + destination, + schedule_as.unwrap_or_else(|| task.owner_id.clone()), + *execution_fee, + encoded_call, + encoded_call_weight, + overall_weight, + instruction_sequence, + ), + Action::DynamicDispatch { encoded_call } => { + Self::run_dynamic_dispatch_action( + task.owner_id.clone(), + encoded_call, + ) + } + }; + + // If an error occurs during the task execution process, the TaskExecutionFailed event will be emitted; + // Otherwise, the TaskExecuted event will be thrown. + if let Some(err) = dispatch_error { + Self::deposit_event(Event::::TaskExecutionFailed { + who: task.owner_id.clone(), + task_id: task_id.clone(), + error: err, + }); + } else { + Self::deposit_event(Event::::TaskExecuted { + who: task.owner_id.clone(), + task_id: task_id.clone(), + }); + } + + Self::handle_task_post_processing(task_id.clone(), task, dispatch_error); + task_action_weight + .saturating_add(T::DbWeight::get().writes(1u64)) + .saturating_add(T::DbWeight::get().reads(1u64)) + } + }; + + weight_left = weight_left.saturating_sub(action_weight); + + if weight_left.ref_time() + < ::WeightInfo::run_tasks_many_found(1).ref_time() + { + break; + } + } + + if consumed_task_index == account_task_ids.len() { + (vec![], weight_left) + } else { + (account_task_ids.split_off(consumed_task_index), weight_left) + } + } + + /// Send events for as many missed tasks as the weight allows from the provided vec of task_ids. + /// + /// Returns a vec with the tasks that were not run and the remaining weight. + pub fn run_missed_tasks( + mut missed_tasks: Vec>, + mut weight_left: Weight, + ) -> (Vec>, Weight) { + let mut consumed_task_index: usize = 0; + for missed_task in missed_tasks.iter() { + consumed_task_index += 1; + + let action_weight = match AccountTasks::::get( + missed_task.owner_id.clone(), + missed_task.task_id.clone(), + ) { + None => { + Self::deposit_event(Event::TaskNotFound { + who: missed_task.owner_id.clone(), + task_id: missed_task.task_id.clone(), + }); + ::WeightInfo::run_missed_tasks_many_missing(1) + } + Some(task) => { + Self::deposit_event(Event::TaskMissed { + who: task.owner_id.clone(), + task_id: missed_task.task_id.clone(), + execution_time: missed_task.execution_time, + }); + Self::handle_task_post_processing(missed_task.task_id.clone(), task, None); + ::WeightInfo::run_missed_tasks_many_found(1) + } + }; + + weight_left = weight_left.saturating_sub(action_weight); + + if weight_left.ref_time() + < ::WeightInfo::run_missed_tasks_many_found(1).ref_time() + { + break; + } + } + + if consumed_task_index == missed_tasks.len() { + (vec![], weight_left) + } else { + (missed_tasks.split_off(consumed_task_index), weight_left) + } + } + + pub fn run_xcmp_task( + destination: Location, + caller: T::AccountId, + fee: AssetPayment, + encoded_call: Vec, + encoded_call_weight: Weight, + overall_weight: Weight, + flow: InstructionSequence, + ) -> (Weight, Option) { + let fee_asset_location = Location::try_from(fee.asset_location); + if fee_asset_location.is_err() { + return ( + ::WeightInfo::run_xcmp_task(), + Some(Error::::BadVersion.into()), + ); + } + let fee_asset_location = fee_asset_location.unwrap(); + + match T::XcmpTransactor::transact_xcm( + destination, + fee_asset_location, + fee.amount, + caller, + encoded_call, + encoded_call_weight, + overall_weight, + flow, + ) { + Ok(()) => (::WeightInfo::run_xcmp_task(), None), + Err(e) => (::WeightInfo::run_xcmp_task(), Some(e)), + } + } + + /// Attempt to decode and run the call. + pub fn run_dynamic_dispatch_action( + caller: AccountOf, + encoded_call: Vec, + ) -> (Weight, Option) { + match ::RuntimeCall::decode(&mut &*encoded_call) { + Ok(scheduled_call) => { + let mut dispatch_origin: T::RuntimeOrigin = + frame_system::RawOrigin::Signed(caller).into(); + dispatch_origin.add_filter( + |call: &::RuntimeCall| { + T::ScheduleAllowList::contains(call) + }, + ); + + let call_weight = scheduled_call.get_dispatch_info().weight; + + let (maybe_actual_call_weight, result) = + match scheduled_call.dispatch(dispatch_origin) { + Ok(post_info) => (post_info.actual_weight, Ok(())), + Err(error_and_info) => ( + error_and_info.post_info.actual_weight, + Err(error_and_info.error), + ), + }; + + ( + maybe_actual_call_weight + .unwrap_or(call_weight) + .saturating_add( + ::WeightInfo::run_dynamic_dispatch_action(), + ), + result.err(), + ) + } + Err(_) => ( + ::WeightInfo::run_dynamic_dispatch_action_fail_decode(), + Some(Error::::CallCannotBeDecoded.into()), + ), + } + } + + /// Decrements task executions left. + /// If task is complete then removes task. If task not complete update task map. + /// A task has been completed if executions left equals 0. + fn decrement_task_and_remove_if_complete(task_id: TaskIdV2, mut task: TaskOf) { + match task.schedule { + Schedule::Fixed { + ref mut executions_left, + .. + } => { + *executions_left = executions_left.saturating_sub(1); + if *executions_left == 0 { + AccountTasks::::remove(task.owner_id.clone(), task_id.clone()); + Self::deposit_event(Event::TaskCompleted { + who: task.owner_id.clone(), + task_id, + }); + } else { + AccountTasks::::insert(task.owner_id.clone(), task_id, task); + } + } + Schedule::Recurring { .. } => {} + } + } + + /// Removes the task of the provided task_id and all scheduled tasks, including those in the task queue. + fn remove_task(task_id: TaskIdV2, task: TaskOf) { + let mut found_task: bool = false; + let mut execution_times = task.execution_times(); + Self::clean_execution_times_vector(&mut execution_times); + let current_time_slot = Self::get_current_time_slot().unwrap_or(0); + + if let Some((last_time_slot, _)) = Self::get_last_slot() { + for execution_time in execution_times.iter().rev() { + // Execution time is less than current time slot and in the past. No more execution times need to be removed. + if *execution_time < current_time_slot { + break; + } + // Execution time is equal to last time slot and task queue should be checked for task id. + // After checking task queue no other execution times need to be removed. + if *execution_time == last_time_slot { + let mut task_queue = Self::get_task_queue(); + for i in 0..task_queue.len() { + if task_queue[i].1 == task_id { + task_queue.remove(i); + TaskQueueV2::::put(task_queue); + found_task = true; + break; + } + } + break; + } + // Execution time is greater than current time slot and in the future. Remove task id from scheduled tasks. + if let Some(ScheduledTasksOf:: { + tasks: mut account_task_ids, + weight, + }) = Self::get_scheduled_tasks(*execution_time) + { + for i in 0..account_task_ids.len() { + if account_task_ids[i].1 == task_id { + if account_task_ids.len() == 1 { + ScheduledTasksV3::::remove(*execution_time); + } else { + account_task_ids.remove(i); + ScheduledTasksV3::::insert( + *execution_time, + ScheduledTasksOf:: { + tasks: account_task_ids, + weight: weight.saturating_sub( + task.action.execution_weight::().unwrap_or(0) + as u128, + ), + }, + ); + } + found_task = true; + break; + } + } + } + } + } else { + // If last time slot does not exist then check each time in scheduled tasks and remove if exists. + for execution_time in execution_times.iter().rev() { + if let Some(ScheduledTasksOf:: { + tasks: mut account_task_ids, + weight, + }) = Self::get_scheduled_tasks(*execution_time) + { + for i in 0..account_task_ids.len() { + if account_task_ids[i].1 == task_id { + if account_task_ids.len() == 1 { + ScheduledTasksV3::::remove(*execution_time); + } else { + account_task_ids.remove(i); + ScheduledTasksV3::::insert( + *execution_time, + ScheduledTasksOf:: { + tasks: account_task_ids, + weight: weight.saturating_sub( + task.action.execution_weight::().unwrap_or(0) + as u128, + ), + }, + ); + } + found_task = true; + break; + } + } + } + } + } + + if !found_task { + Self::deposit_event(Event::TaskNotFound { + who: task.owner_id.clone(), + task_id: task_id.clone(), + }); + } + + // TODO: Add refund reserved execution fees here + + AccountTasks::::remove(task.owner_id.clone(), task_id.clone()); + + Self::deposit_event(Event::TaskCancelled { + who: task.owner_id, + task_id, + }); + } + + /// Schedule task and return it's task_id. + pub fn schedule_task(task: &TaskOf) -> Result> { + let owner_id = task.owner_id.clone(); + + let execution_times = task.execution_times(); + + if AccountTasks::::contains_key(&owner_id, task.task_id.clone()) { + Err(Error::::DuplicateTask)?; + } + + // If 'dev-queue' feature flag and execution_times equals [0], allows for putting a task directly on the task queue + #[cfg(feature = "dev-queue")] + if execution_times == vec![0] { + let mut task_queue = Self::get_task_queue(); + task_queue.push((owner_id, task.task_id.clone())); + TaskQueueV2::::put(task_queue); + + return Ok(task.task_id.clone()); + } + + Self::insert_scheduled_tasks(task, execution_times) + } + + /// Insert the account/task id into scheduled tasks + /// With transaction will protect against a partial success where N of M execution times might be full, + /// rolling back any successful insertions into the schedule task table. + fn insert_scheduled_tasks( + task: &TaskOf, + execution_times: Vec, + ) -> Result> { + let task_id = task.task_id.clone(); + + with_transaction( + || -> storage::TransactionOutcome> { + for time in execution_times.iter() { + let mut scheduled_tasks = + Self::get_scheduled_tasks(*time).unwrap_or_default(); + if scheduled_tasks + .try_push::(task_id.clone(), task) + .is_err() + { + return Rollback(Err(DispatchError::Other("time slot full"))); + } + >::insert(*time, scheduled_tasks); + } + + Commit(Ok(task_id)) + }, + ) + .map_err(|_| Error::::TimeSlotFull) + } + + /// Validate and schedule task. + /// This will also charge the execution fee. + fn validate_and_schedule_task( + action: ActionOf, + owner_id: AccountOf, + schedule: Schedule, + abort_errors: Vec>, + ) -> Result { + if let Action::XCMP { execution_fee, .. } = action.clone() { + let asset_location = Location::try_from(execution_fee.asset_location) + .map_err(|()| Error::::BadVersion)?; + let _asset_location = asset_location + .reanchored(&T::SelfLocation::get(), &T::UniversalLocation::get()) + .map_err(|_| Error::::CannotReanchor)?; + }; + + let executions = schedule.known_executions_left(); + + let task = TaskOf::::new( + owner_id.clone(), + Self::generate_task_idv2(), + schedule, + action.clone(), + abort_errors, + ); + + let task_id = + T::FeeHandler::pay_checked_fees_for(&owner_id, &action, executions, || { + let task_id = Self::schedule_task(&task)?; + AccountTasks::::insert(owner_id.clone(), task_id.clone(), task); + Ok(task_id) + })?; + + Ok(task_id) + } + + /// Schedule a task with TaskScheduled event. + pub fn schedule_task_with_event( + action: ActionOf, + owner_id: AccountOf, + schedule: Schedule, + abort_errors: Vec>, + encoded_call: Option>, + ) -> DispatchResult { + // Schedule the task. + let task_id: TaskIdV2 = Self::validate_and_schedule_task( + action.clone(), + owner_id.clone(), + schedule, + abort_errors, + )?; + + let schedule_as = match action { + Action::XCMP { schedule_as, .. } => schedule_as, + _ => None, + }; + + // Deposit the event. + Self::deposit_event(Event::::TaskScheduled { + who: owner_id, + task_id, + schedule_as, + encoded_call, + }); + + Ok(()) + } + + fn reschedule_or_remove_task(mut task: TaskOf, dispatch_error: Option) { + let task_id = task.task_id.clone(); + // When the error can be found in the abort_errors list, the next task execution will not be scheduled. + // Otherwise, continue to schedule next execution. + match dispatch_error { + Some(err) + if err == DispatchError::from(Error::::CallCannotBeDecoded) + || task + .abort_errors + .contains(&Into::<&str>::into(err).as_bytes().to_vec()) => + { + Self::deposit_event(Event::::TaskNotRescheduled { + who: task.owner_id.clone(), + task_id: task_id.clone(), + error: err, + }); + AccountTasks::::remove(task.owner_id.clone(), task_id); + } + _ => { + let owner_id = task.owner_id.clone(); + let action = task.action.clone(); + match Self::reschedule_existing_task(&mut task) { + Ok(_) => { + let schedule_as = match action { + Action::XCMP { schedule_as, .. } => schedule_as, + _ => None, + }; + Self::deposit_event(Event::::TaskRescheduled { + who: owner_id.clone(), + task_id: task_id.clone(), + schedule_as, + }); + AccountTasks::::insert(owner_id, task_id, task.clone()); + } + Err(err) => { + Self::deposit_event(Event::::TaskRescheduleFailed { + who: task.owner_id.clone(), + task_id: task_id.clone(), + error: err, + }); + AccountTasks::::remove(task.owner_id.clone(), task_id); + } + }; + } + } + } + + fn reschedule_existing_task(task: &mut TaskOf) -> DispatchResult { + let task_id = task.task_id.clone(); + + match task.schedule { + Schedule::Recurring { + ref mut next_execution_time, + frequency, + } => { + let new_execution_time = next_execution_time + .checked_add(frequency) + .ok_or(Error::::InvalidTime)?; + *next_execution_time = new_execution_time; + + // TODO: should execution fee depend on whether task is recurring? + T::FeeHandler::pay_checked_fees_for(&task.owner_id, &task.action, 1, || { + Self::insert_scheduled_tasks(task, vec![new_execution_time]) + .map_err(|e| e.into()) + })?; + + let owner_id = task.owner_id.clone(); + AccountTasks::::insert(owner_id, task_id, task.clone()); + } + Schedule::Fixed { .. } => {} + } + Ok(()) + } + + fn handle_task_post_processing( + task_id: TaskIdV2, + task: TaskOf, + error: Option, + ) { + match task.schedule { + Schedule::Fixed { .. } => { + Self::decrement_task_and_remove_if_complete(task_id, task) + } + Schedule::Recurring { .. } => Self::reschedule_or_remove_task(task, error), + } + } + + pub fn generate_task_idv2() -> TaskIdV2 { + let current_block_number = + TryInto::::try_into(>::block_number()) + .ok() + .unwrap_or(0); + + let tx_id = >::extrinsic_index().unwrap_or(0); + + let evt_index = >::event_count(); + + format!("{:}-{:}-{:}", current_block_number, tx_id, evt_index) + .as_bytes() + .to_vec() + } + + /// Calculates the execution fee for a given action based on weight and num of executions + /// + /// Fee saturates at Weight/BalanceOf when there are an unreasonable num of executions + /// In practice, executions is bounded by T::MaxExecutionTimes and unlikely to saturate + pub fn calculate_schedule_fee_amount( + action: &ActionOf, + executions: u32, + ) -> Result, DispatchError> { + let total_weight = action + .execution_weight::()? + .saturating_mul(executions.into()); + + let schedule_fee_location = action.schedule_fee_location::(); + let schedule_fee_location = schedule_fee_location + .reanchored(&T::SelfLocation::get(), &T::UniversalLocation::get()) + .map_err(|_| Error::::CannotReanchor)?; + + let fee = if schedule_fee_location == Location::default() { + T::ExecutionWeightFee::get() + .saturating_mul(>::saturated_from(total_weight)) + } else { + let raw_fee = + T::FeeConversionRateProvider::get_fee_per_second(&schedule_fee_location) + .ok_or("CouldNotDetermineFeePerSecond")? + .checked_mul(total_weight as u128) + .ok_or("FeeOverflow") + .map(|raw_fee| raw_fee / (WEIGHT_REF_TIME_PER_SECOND as u128))?; + >::saturated_from(raw_fee) + }; + + Ok(fee) + } + + /// Checks if the execution fee location is supported for scheduling a task + /// + /// if the locations can not be verified, an error such as InvalidAssetLocation or UnsupportedFeePayment will be thrown + pub fn ensure_supported_execution_fee_location( + exeuction_fee_location: &Location, + destination: &Location, + ) -> Result<(), DispatchError> { + let exeuction_fee = Asset { + id: exeuction_fee_location.clone().into(), + fun: Fungibility::Fungible(0), + }; + let reserve = T::ReserveProvider::reserve(&exeuction_fee) + .ok_or(Error::::InvalidAssetLocation)?; + if reserve != Location::here() && &reserve != destination { + return Err(Error::::UnsupportedFeePayment.into()); + } + + Ok(()) + } + } +} diff --git a/pallets/automation-time/src/mock.rs b/pallets/automation-time/src/mock.rs new file mode 100644 index 000000000..99dfcb014 --- /dev/null +++ b/pallets/automation-time/src/mock.rs @@ -0,0 +1,665 @@ +// This file is part of Ava Protocol. + +// Copyright (C) 2022 Ava Protocol +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate as pallet_automation_time; +use crate::TaskIdV2; + +use ava_protocol_primitives::{AbsoluteAndRelativeReserveProvider, EnsureProxy}; +use frame_support::{ + assert_ok, construct_runtime, parameter_types, + traits::{ConstU32, Everything}, + weights::Weight, + PalletId, +}; +use frame_system::{self as system, RawOrigin}; +use orml_traits::parameter_type_with_key; +use sp_core::H256; +use sp_runtime::{ + traits::{AccountIdConversion, BlakeTwo256, Convert, IdentityLookup}, + AccountId32, BuildStorage, DispatchError, Perbill, +}; +use sp_std::{marker::PhantomData, vec::Vec}; +use staging_xcm::latest::{prelude::*, Junctions::*}; + +type Block = system::mocking::MockBlock; + +pub type Balance = u128; +pub type AccountId = AccountId32; +pub type CurrencyId = u32; + +pub const ALICE: [u8; 32] = [1u8; 32]; +pub const BOB: [u8; 32] = [2u8; 32]; +pub const DELEGATOR_ACCOUNT: [u8; 32] = [3u8; 32]; +pub const PROXY_ACCOUNT: [u8; 32] = [4u8; 32]; + +pub const PARA_ID: u32 = 2000; +pub const NATIVE: CurrencyId = 0; +pub const NATIVE_LOCATION: Location = Location { + parents: 0, + interior: Here, +}; +pub const NATIVE_EXECUTION_WEIGHT_FEE: u128 = 12; +pub const FOREIGN_CURRENCY_ID: CurrencyId = 1; + +#[derive(Clone)] +pub struct MockAssetFeePerSecond { + pub asset_location: Location, + pub fee_per_second: u128, +} + +pub fn get_moonbase_asset_location() -> Location { + Location { + parents: 1, + interior: X2([Parachain(1000u32), PalletInstance(3u8)].into()), + } +} + +pub fn get_asset_fee_per_second_config() -> Vec { + let asset_fee_per_second: [MockAssetFeePerSecond; 3] = [ + MockAssetFeePerSecond { + asset_location: Location { + parents: 1, + interior: Parachain(2000).into(), + }, + fee_per_second: 416_000_000_000, + }, + MockAssetFeePerSecond { + asset_location: Location { + parents: 1, + interior: X2([ + Parachain(2110), + GeneralKey { + length: 4, + data: [0; 32], + }, + ] + .into()), + }, + fee_per_second: 416_000_000_000, + }, + MockAssetFeePerSecond { + asset_location: get_moonbase_asset_location(), + fee_per_second: 10_000_000_000_000_000_000, + }, + ]; + asset_fee_per_second.to_vec() +} + +construct_runtime!( + pub enum Test + { + System: system, + Timestamp: pallet_timestamp, + Balances: pallet_balances, + ParachainInfo: parachain_info, + Tokens: orml_tokens, + Currencies: orml_currencies, + AutomationTime: pallet_automation_time, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 51; +} + +impl system::Config for Test { + type BaseCallFilter = Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Nonce = u64; + type Block = Block; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId32; + type Lookup = IdentityLookup; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = SS58Prefix; + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; + type RuntimeTask = (); + type SingleBlockMigrations = (); + type MultiBlockMigrator = (); + type PreInherents = (); + type PostInherents = (); + type PostTransactions = (); +} + +parameter_types! { + pub const ExistentialDeposit: u64 = 1; + pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 50; +} + +impl pallet_balances::Config for Test { + type MaxLocks = MaxLocks; + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; + type FreezeIdentifier = (); + type MaxFreezes = ConstU32<0>; + type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); + type WeightInfo = (); +} + +impl parachain_info::Config for Test {} + +parameter_type_with_key! { + pub ExistentialDeposits: |_currency_id: CurrencyId| -> Balance { + Default::default() + }; +} +parameter_types! { + pub DustAccount: AccountId = PalletId(*b"auto/dst").into_account_truncating(); +} + +impl orml_tokens::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type Amount = i64; + type CurrencyId = CurrencyId; + type WeightInfo = (); + type ExistentialDeposits = ExistentialDeposits; + type CurrencyHooks = (); + type MaxLocks = ConstU32<100_000>; + type MaxReserves = ConstU32<100_000>; + type ReserveIdentifier = [u8; 8]; + type DustRemovalWhitelist = frame_support::traits::Nothing; +} + +impl orml_currencies::Config for Test { + type MultiCurrency = Tokens; + type NativeCurrency = AdaptedBasicCurrency; + type GetNativeCurrencyId = GetNativeCurrencyId; + type WeightInfo = (); +} +pub type AdaptedBasicCurrency = orml_currencies::BasicCurrencyAdapter; + +parameter_types! { + pub const MinimumPeriod: u64 = 1000; +} + +impl pallet_timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); +} + +pub struct MockWeight(PhantomData); +impl pallet_automation_time::WeightInfo for MockWeight { + fn schedule_auto_compound_delegated_stake_task_full() -> Weight { + Weight::zero() + } + fn schedule_dynamic_dispatch_task(_v: u32) -> Weight { + Weight::zero() + } + fn schedule_dynamic_dispatch_task_full(_v: u32) -> Weight { + Weight::zero() + } + fn schedule_xcmp_task_full(_v: u32) -> Weight { + Weight::zero() + } + fn cancel_scheduled_task_full() -> Weight { + Weight::zero() + } + fn force_cancel_scheduled_task() -> Weight { + Weight::zero() + } + fn force_cancel_scheduled_task_full() -> Weight { + Weight::zero() + } + fn cancel_task_with_schedule_as_full() -> Weight { + Weight::zero() + } + fn run_xcmp_task() -> Weight { + Weight::from_parts(20_000, 0) + } + fn run_auto_compound_delegated_stake_task() -> Weight { + Weight::from_parts(20_000, 0) + } + fn run_dynamic_dispatch_action() -> Weight { + Weight::from_parts(20_000, 0) + } + fn run_dynamic_dispatch_action_fail_decode() -> Weight { + Weight::from_parts(20_000, 0) + } + fn run_missed_tasks_many_found(v: u32) -> Weight { + Weight::from_parts(10_000 * v as u64, 0u64) + } + fn run_missed_tasks_many_missing(v: u32) -> Weight { + Weight::from_parts(10_000 * v as u64, 0u64) + } + fn run_tasks_many_found(v: u32) -> Weight { + Weight::from_parts(50_000 * v as u64, 0u64) + } + fn run_tasks_many_missing(v: u32) -> Weight { + Weight::from_parts(10_000 * v as u64, 0u64) + } + fn update_task_queue_overhead() -> Weight { + Weight::from_parts(10_000, 0) + } + fn append_to_missed_tasks(v: u32) -> Weight { + Weight::from_parts(20_000 * v as u64, 0u64) + } + fn update_scheduled_task_queue() -> Weight { + Weight::from_parts(20_000, 0u64) + } + fn shift_missed_tasks() -> Weight { + Weight::from_parts(900_000, 0u64) + } +} + +pub struct MockXcmpTransactor(PhantomData<(T, C)>); +impl pallet_xcmp_handler::XcmpTransactor + for MockXcmpTransactor +where + T: Config + pallet::Config, + C: frame_support::traits::ReservableCurrency, +{ + fn transact_xcm( + _destination: Location, + _location: Location, + _fee: u128, + _caller: T::AccountId, + _transact_encoded_call: sp_std::vec::Vec, + _transact_encoded_call_weight: Weight, + _overall_weight: Weight, + _flow: InstructionSequence, + ) -> Result<(), sp_runtime::DispatchError> { + Ok(()) + } + + fn pay_xcm_fee( + _: CurrencyId, + _: T::AccountId, + _: u128, + ) -> Result<(), sp_runtime::DispatchError> { + Ok(()) + } +} + +pub struct ScheduleAllowList; +impl Contains for ScheduleAllowList { + fn contains(c: &RuntimeCall) -> bool { + matches!(c, RuntimeCall::System(_) | RuntimeCall::Balances(_)) + } +} + +pub struct MockConversionRateProvider; +impl FixedConversionRateProvider for MockConversionRateProvider { + fn get_fee_per_second(location: &Location) -> Option { + get_fee_per_second(location) + } +} + +pub struct MockTokenIdConvert; +impl Convert> for MockTokenIdConvert { + fn convert(id: CurrencyId) -> Option { + if id == NATIVE { + Some(Location::new(0, Here)) + } else if id == FOREIGN_CURRENCY_ID { + Some(Location::new(1, X1([Parachain(PARA_ID)].into()))) + } else { + None + } + } +} + +impl Convert> for MockTokenIdConvert { + fn convert(location: Location) -> Option { + if location == Location::new(0, Here) { + Some(NATIVE) + } else if location == Location::new(1, X1([Parachain(PARA_ID)].into())) { + Some(FOREIGN_CURRENCY_ID) + } else { + None + } + } +} + +pub struct MockEnsureProxy; +impl EnsureProxy for MockEnsureProxy { + fn ensure_ok(_delegator: AccountId, _delegatee: AccountId) -> Result<(), &'static str> { + if _delegator == DELEGATOR_ACCOUNT.into() && _delegatee == PROXY_ACCOUNT.into() { + Ok(()) + } else { + Err("proxy error: expected `ProxyType::Any`") + } + } +} + +parameter_types! { + pub const MaxTasksPerSlot: u32 = 2; + #[derive(Debug)] + pub const MaxExecutionTimes: u32 = 3; + pub const MaxScheduleSeconds: u64 = 86_400; // 24 hours in seconds + pub const SlotSizeSeconds: u64 = 600; // 10 minutes in seconds; + pub const MaxBlockWeight: u64 = 24_000_000; + pub const MaxWeightPercentage: Perbill = Perbill::from_percent(40); + pub const UpdateQueueRatio: Perbill = Perbill::from_percent(50); + pub const ExecutionWeightFee: Balance = NATIVE_EXECUTION_WEIGHT_FEE; + + // When unit testing dynamic dispatch, we use the real weight value of the extrinsics call + // This is an external lib that we don't own so we try to not mock, follow the rule don't mock + // what you don't own + // One of test we do is Balances::transfer call, which has its weight define here: + // https://github.com/paritytech/polkadot-sdk/blob/polkadot-v0.9.38/frame/balances/src/weights.rs#L61-L73 + // When logging the final calculated amount, its value is 73_314_000. + // + // in our unit test, we test a few transfers with dynamic dispatch. On top + // of that, there is also weight of our call such as fetching the tasks, + // move from schedule slot to tasks queue,.. so the weight of a schedule + // transfer with dynamic dispatch is even higher. + // + // and because we test run a few of them so I set it to ~10x value of 73_314_000 + pub const MaxWeightPerSlot: u128 = 700_000_000; + pub const XmpFee: u128 = 1_000_000; + pub const GetNativeCurrencyId: CurrencyId = NATIVE; + pub const RelayNetwork: NetworkId = NetworkId::Rococo; + // The universal location within the global consensus system + pub UniversalLocation: InteriorLocation = + X2([GlobalConsensus(RelayNetwork::get()), Parachain(ParachainInfo::parachain_id().into())].into()); + pub SelfLocation: Location = Location::new(1, X1([Parachain(ParachainInfo::parachain_id().into())].into())); +} + +impl pallet_automation_time::Config for Test { + type RuntimeEvent = RuntimeEvent; + type MaxTasksPerSlot = MaxTasksPerSlot; + type MaxExecutionTimes = MaxExecutionTimes; + type MaxScheduleSeconds = MaxScheduleSeconds; + type MaxBlockWeight = MaxBlockWeight; + type MaxWeightPercentage = MaxWeightPercentage; + type UpdateQueueRatio = UpdateQueueRatio; + type WeightInfo = MockWeight; + type ExecutionWeightFee = ExecutionWeightFee; + type MaxWeightPerSlot = MaxWeightPerSlot; + type SlotSizeSeconds = SlotSizeSeconds; + type Currency = Balances; + type MultiCurrency = Currencies; + type CurrencyId = CurrencyId; + type FeeHandler = FeeHandler; + type XcmpTransactor = MockXcmpTransactor; + type RuntimeCall = RuntimeCall; + type ScheduleAllowList = ScheduleAllowList; + type CurrencyIdConvert = MockTokenIdConvert; + type FeeConversionRateProvider = MockConversionRateProvider; + type EnsureProxy = MockEnsureProxy; + type UniversalLocation = UniversalLocation; + type ReserveProvider = AbsoluteAndRelativeReserveProvider; + type SelfLocation = SelfLocation; +} + +// Build genesis storage according to the mock runtime. +pub fn new_test_ext(state_block_time: u64) -> sp_io::TestExternalities { + let genesis_storage = system::GenesisConfig::::default() + .build_storage() + .unwrap(); + let mut ext = sp_io::TestExternalities::new(genesis_storage); + ext.execute_with(|| System::set_block_number(1)); + ext.execute_with(|| Timestamp::set_timestamp(state_block_time)); + ext +} + +// A function to support test scheduleing a Fixed schedule +// We don't focus on making sure the execution run properly. We just focus on +// making sure a task is scheduled into the queue +pub fn schedule_task(owner: [u8; 32], scheduled_times: Vec, message: Vec) -> TaskIdV2 { + let call: RuntimeCall = frame_system::Call::remark_with_event { remark: message }.into(); + + schedule_dynamic_dispatch_task(owner, scheduled_times, call) +} + +pub fn schedule_dynamic_dispatch_task( + owner: [u8; 32], + scheduled_times: Vec, + call: RuntimeCall, +) -> TaskIdV2 { + let account_id = AccountId32::new(owner); + + assert_ok!(fund_account_dynamic_dispatch( + &account_id, + scheduled_times.len(), + call.encode() + )); + + assert_ok!(AutomationTime::schedule_dynamic_dispatch_task( + RuntimeOrigin::signed(account_id), + ScheduleParam::Fixed { + execution_times: scheduled_times + }, + Box::new(call), + )); + last_task_id() +} + +// A function to support test scheduling a Recurring schedule +// We don't focus on making sure the execution run properly. We just focus on +// making sure a task is scheduled into the queue +pub fn schedule_recurring_task( + owner: [u8; 32], + next_execution_time: UnixTime, + frequency: Seconds, + message: Vec, +) -> TaskIdV2 { + let account_id = AccountId32::new(owner); + let call: RuntimeCall = frame_system::Call::remark_with_event { remark: message }.into(); + + assert_ok!(fund_account_dynamic_dispatch(&account_id, 1, call.encode())); + + assert_ok!(AutomationTime::schedule_dynamic_dispatch_task( + RuntimeOrigin::signed(account_id), + ScheduleParam::Recurring { + next_execution_time, + frequency + }, + Box::new(call), + )); + last_task_id() +} + +pub fn add_task_to_task_queue( + owner: [u8; 32], + task_id: TaskIdV2, + scheduled_times: Vec, + action: ActionOf, + abort_errors: Vec>, +) -> TaskIdV2 { + let schedule = Schedule::new_fixed_schedule::(scheduled_times).unwrap(); + add_to_task_queue(owner, task_id, schedule, action, abort_errors) +} + +pub fn add_to_task_queue( + owner: [u8; 32], + task_id: TaskIdV2, + schedule: Schedule, + action: ActionOf, + abort_errors: Vec>, +) -> TaskIdV2 { + let task_id = create_task(owner, task_id, schedule, action, abort_errors); + let mut task_queue = AutomationTime::get_task_queue(); + task_queue.push((AccountId32::new(owner), task_id.clone())); + TaskQueueV2::::put(task_queue); + task_id +} + +pub fn add_task_to_missed_queue( + owner: [u8; 32], + task_id: TaskIdV2, + scheduled_times: Vec, + action: ActionOf, + abort_errors: Vec>, +) -> TaskIdV2 { + let schedule = Schedule::new_fixed_schedule::(scheduled_times.clone()).unwrap(); + let task_id = create_task(owner, task_id, schedule, action, abort_errors); + let missed_task = + MissedTaskV2Of::::new(AccountId32::new(owner), task_id.clone(), scheduled_times[0]); + let mut missed_queue = AutomationTime::get_missed_queue(); + missed_queue.push(missed_task); + MissedQueueV2::::put(missed_queue); + task_id +} + +pub fn create_task( + owner: [u8; 32], + task_id: TaskIdV2, + schedule: Schedule, + action: ActionOf, + abort_errors: Vec>, +) -> TaskIdV2 { + let task = TaskOf::::new( + owner.into(), + task_id.clone(), + schedule, + action, + abort_errors, + ); + AccountTasks::::insert(AccountId::new(owner), task_id.clone(), task); + task_id +} + +pub fn events() -> Vec { + let events = System::events(); + let evt = events.into_iter().map(|evt| evt.event).collect::>(); + + System::reset_events(); + + evt +} + +pub fn last_event() -> RuntimeEvent { + events().pop().unwrap() +} + +// A utility test function to simplify the process of getting a task id that we just scheduled in the +// test by looking at the last id and pluck it +pub fn last_task_id() -> TaskIdV2 { + get_task_ids_from_events() + .last() + .expect("Unable to find a task_id from the existing TaskScheduled events") + .clone() +} + +// A utility test function to pluck out the task id from events, useful when dealing with multiple +// task scheduling +pub fn get_task_ids_from_events() -> Vec { + System::events() + .into_iter() + .filter_map(|e| match e.event { + RuntimeEvent::AutomationTime(crate::Event::TaskScheduled { task_id, .. }) => { + Some(task_id) + } + _ => None, + }) + .collect::>() +} + +pub fn get_funds(account: AccountId) { + let double_action_weight = Weight::from_parts(20_000_u64, 0u64) * 2; + + let action_fee = ExecutionWeightFee::get() * u128::from(double_action_weight.ref_time()); + let max_execution_fee = action_fee * u128::from(MaxExecutionTimes::get()); + Balances::force_set_balance(RawOrigin::Root.into(), account, max_execution_fee).unwrap(); +} + +pub fn get_minimum_funds(account: AccountId, executions: u32) { + let double_action_weight = Weight::from_parts(20_000_u64, 0u64) * 2; + let action_fee = ExecutionWeightFee::get() * u128::from(double_action_weight.ref_time()); + let max_execution_fee = action_fee * u128::from(executions); + Balances::force_set_balance(RawOrigin::Root.into(), account, max_execution_fee).unwrap(); +} + +pub fn get_xcmp_funds(account: AccountId) { + let double_action_weight = MockWeight::::run_xcmp_task() * 2; + let action_fee = ExecutionWeightFee::get() * u128::from(double_action_weight.ref_time()); + let max_execution_fee = action_fee * u128::from(MaxExecutionTimes::get()); + let with_xcm_fees = max_execution_fee + XmpFee::get(); + Balances::force_set_balance(RawOrigin::Root.into(), account, with_xcm_fees).unwrap(); +} + +pub fn get_multi_xcmp_funds(account: AccountId) { + let double_action_weight = MockWeight::::run_xcmp_task() * 2; + let action_fee = ExecutionWeightFee::get() * u128::from(double_action_weight.ref_time()); + let max_execution_fee = action_fee * u128::from(MaxExecutionTimes::get()); + Balances::force_set_balance(RawOrigin::Root.into(), account.clone(), max_execution_fee) + .unwrap(); + Currencies::update_balance( + RawOrigin::Root.into(), + account, + FOREIGN_CURRENCY_ID, + XmpFee::get() as i64, + ) + .unwrap(); +} + +// TODO: swap above to this pattern +pub fn fund_account_dynamic_dispatch( + account: &AccountId, + execution_count: usize, + encoded_call: Vec, +) -> Result<(), DispatchError> { + let action: ActionOf = Action::DynamicDispatch { encoded_call }; + let action_weight = action.execution_weight::()?; + fund_account(account, action_weight, execution_count, None); + Ok(()) +} + +pub fn fund_account( + account: &AccountId, + action_weight: u64, + execution_count: usize, + additional_amount: Option, +) { + let amount: u128 = + u128::from(action_weight) * ExecutionWeightFee::get() * execution_count as u128 + + additional_amount.unwrap_or(0) + + u128::from(ExistentialDeposit::get()); + _ = ::Currency::deposit_creating(account, amount); +} + +pub fn get_fee_per_second(location: &Location) -> Option { + let location = location + .clone() + .reanchored( + &SelfLocation::get(), + &::UniversalLocation::get(), + ) + .expect("Reanchor location failed"); + + let found_asset = get_asset_fee_per_second_config().into_iter().find(|item| { + let MockAssetFeePerSecond { asset_location, .. } = item; + asset_location == &location + }); + + found_asset.map(|asset| asset.fee_per_second) +} diff --git a/pallets/automation-time/src/tests.rs b/pallets/automation-time/src/tests.rs new file mode 100644 index 000000000..90cc04056 --- /dev/null +++ b/pallets/automation-time/src/tests.rs @@ -0,0 +1,3572 @@ +// This file is part of Ava Protocol. + +// Copyright (C) 2022 Ava Protocol +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + mock::*, AccountTasks, Action, ActionOf, AssetPayment, Config, Error, InstructionSequence, + LastTimeSlot, MissedTaskV2Of, ScheduleParam, ScheduledTasksOf, TaskOf, TaskQueueV2, WeightInfo, +}; + +use frame_support::{ + assert_noop, assert_ok, + dispatch::GetDispatchInfo, + pallet_prelude::DispatchError, + traits::OnInitialize, + weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, +}; +use frame_system::{self, RawOrigin}; +use parity_scale_codec::Encode; +use rand::Rng; +use sp_runtime::{ + traits::{BlakeTwo256, Hash}, + AccountId32, + TokenError::FundsUnavailable, +}; +use sp_std::collections::btree_map::BTreeMap; +use staging_xcm::{ + latest::{prelude::*, Junction::Parachain, Junctions::*}, + VersionedLocation, +}; + +pub const START_BLOCK_TIME: u64 = 33198768000 * 1_000; +const MAX_SCHEDULE_SECONDS: u64 = ::MaxScheduleSeconds::get(); +pub const SLOT_SIZE_SECONDS: u64 = ::SlotSizeSeconds::get(); +pub const SCHEDULED_TIME: u64 = START_BLOCK_TIME / 1_000 + SLOT_SIZE_SECONDS * 2; +const LAST_BLOCK_TIME: u64 = START_BLOCK_TIME / 1_000; + +// This is 1-0-3: {1: block idx}-{0: first extrinsic in block}-{3: the event index} +const FIRST_TASK_ID: [u8; 5] = [49, 45, 48, 45, 51]; +const SECOND_TASK_ID: [u8; 5] = [49, 45, 48, 45, 54]; + +const EXPECT_CALCULATE_SCHEDULE_FEE_AMOUNT: &str = "Calculate schedule fee amount should work"; + +struct XcmpActionParams { + destination: Location, + schedule_fee: Location, + execution_fee: AssetPayment, + encoded_call: Vec, + encoded_call_weight: Weight, + overall_weight: Weight, + schedule_as: Option, + instruction_sequence: InstructionSequence, +} + +impl Default for XcmpActionParams { + fn default() -> Self { + let delegator_account = AccountId32::new(DELEGATOR_ACCOUNT); + XcmpActionParams { + destination: Location::new(1, X1([Parachain(PARA_ID)].into())), + schedule_fee: get_moonbase_asset_location(), + execution_fee: *Box::new(AssetPayment { + asset_location: get_moonbase_asset_location().into(), + amount: 100, + }), + encoded_call: vec![3, 4, 5], + encoded_call_weight: Weight::from_parts(100_000, 0), + overall_weight: Weight::from_parts(200_000, 0), + schedule_as: Some(delegator_account), + instruction_sequence: InstructionSequence::PayThroughRemoteDerivativeAccount, + } + } +} + +fn create_xcmp_action(options: XcmpActionParams) -> ActionOf { + Action::XCMP { + destination: options.destination, + schedule_fee: options.schedule_fee, + execution_fee: Box::new(options.execution_fee), + encoded_call: options.encoded_call, + encoded_call_weight: options.encoded_call_weight, + overall_weight: options.overall_weight, + schedule_as: options.schedule_as, + instruction_sequence: options.instruction_sequence, + } +} + +fn create_dynamic_dispatch_remark_action(remark: Vec) -> ActionOf { + let call: ::RuntimeCall = + frame_system::Call::remark_with_event { remark }.into(); + Action::DynamicDispatch { + encoded_call: call.encode(), + } +} + +fn generate_random_num(min: u32, max: u32) -> u32 { + rand::thread_rng().gen_range(min..max) +} + +fn calculate_local_action_schedule_fee(weight: Weight, num_of_execution: u32) -> u128 { + NATIVE_EXECUTION_WEIGHT_FEE * (weight.ref_time() as u128) * (num_of_execution as u128) +} + +fn calculate_expected_xcmp_action_schedule_fee( + schedule_fee_location: Location, + num_of_execution: u32, +) -> u128 { + let schedule_fee_location = schedule_fee_location + .reanchored( + &SelfLocation::get(), + &::UniversalLocation::get(), + ) + .expect("Location reanchor failed"); + let weight = ::WeightInfo::run_xcmp_task(); + + if schedule_fee_location == Location::default() { + calculate_local_action_schedule_fee(weight, num_of_execution) + } else { + let fee_per_second = + get_fee_per_second(&schedule_fee_location).expect("Get fee per second should work"); + fee_per_second * (weight.ref_time() as u128) * (num_of_execution as u128) + / (WEIGHT_REF_TIME_PER_SECOND as u128) + } +} + +// Helper function to asset event easiser +/// Assert the given `event` exists. +#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] +pub fn assert_has_event(event: RuntimeEvent) { + let evts = System::events() + .into_iter() + .map(|evt| evt.event) + .collect::>(); + assert!(evts.iter().any(|record| record == &event)) +} + +#[allow(dead_code)] +#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] +pub fn assert_last_event(event: RuntimeEvent) { + assert_eq!(events().last().expect("events expected"), &event); +} + +/// Check that events appear in the emitted_events list in order, +fn contains_events(emitted_events: Vec, events: Vec) -> bool { + // If the target events list is empty, consider it satisfied as there are no specific order requirements + if events.is_empty() { + return true; + } + + // Convert both lists to iterators + let mut emitted_iter = emitted_events.iter(); + let events_iter = events.iter(); + + // Iterate through the target events + for target_event in events_iter { + // Initialize a boolean variable to track whether the target event is found + let mut found = false; + + // Continue iterating through the emitted events until a match is found or there are no more emitted events + for emitted_event in emitted_iter.by_ref() { + // Compare event type and event data for a match + if emitted_event == target_event { + // Target event found, mark as found and advance the emitted iterator + found = true; + break; + } + } + + // If the target event is not found, return false + if !found { + return false; + } + } + + // If all target events are found in order, return true + true +} + +// when schedule with a Fixed Time schedule and passing an epoch that isn't the +// beginning of hour, raise an error +// the smallest granularity unit we allow is hour +#[test] +fn schedule_invalid_time_fixed_schedule() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + // prepare data + let call: RuntimeCall = frame_system::Call::remark { remark: vec![12] }.into(); + + assert_noop!( + AutomationTime::schedule_dynamic_dispatch_task( + RuntimeOrigin::signed(AccountId32::new(ALICE)), + // Simulate epoch of 1 extra second at the beginning of this hour + ScheduleParam::Fixed { + execution_times: vec![SCHEDULED_TIME + 1] + }, + Box::new(call) + ), + Error::::InvalidTime, + ); + }) +} + +// The schedule time is beginning of the hour epoch We will arrange our tasks +// into slot of hour and don't support schedule job to granularity of a unit +// that is smaller than hour. +// Verify that we're throwing InvalidTime error when caller doing so +#[test] +fn schedule_invalid_time_recurring_schedule() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + for (next_run, frequency) in [ + (SCHEDULED_TIME + 10, 10_u64), + (SCHEDULED_TIME + SLOT_SIZE_SECONDS, 100_u64), + (SCHEDULED_TIME + 10, SLOT_SIZE_SECONDS), + ] + .iter() + { + // prepare data + let call: RuntimeCall = frame_system::Call::remark { remark: vec![12] }.into(); + assert_noop!( + AutomationTime::schedule_dynamic_dispatch_task( + RuntimeOrigin::signed(AccountId32::new(ALICE)), + ScheduleParam::Recurring { + next_execution_time: *next_run, + frequency: *frequency + }, + Box::new(call) + ), + Error::::InvalidTime, + ); + } + }) +} + +// when schedule task using Fixed Time Scheduled, if any of the time is in the +// past an error is return and the tasks won't be scheduled +#[test] +fn schedule_past_time() { + new_test_ext(START_BLOCK_TIME + 1_000 * SLOT_SIZE_SECONDS * 3).execute_with(|| { + assert_noop!( + AutomationTime::schedule_dynamic_dispatch_task( + RuntimeOrigin::signed(AccountId32::new(ALICE)), + ScheduleParam::Fixed { + execution_times: vec![SCHEDULED_TIME] + }, + Box::new(frame_system::Call::remark { remark: vec![12] }.into()) + ), + Error::::PastTime, + ); + + assert_noop!( + AutomationTime::schedule_dynamic_dispatch_task( + RuntimeOrigin::signed(AccountId32::new(ALICE)), + ScheduleParam::Fixed { + execution_times: vec![SCHEDULED_TIME - SLOT_SIZE_SECONDS] + }, + Box::new(frame_system::Call::remark { remark: vec![12] }.into()) + ), + Error::::PastTime, + ); + }) +} + +// when schedule task using Recurring Scheduled, if starting time is in the past, +// an error is return and the tasks won't be scheduled +#[test] +fn schedule_past_time_recurring() { + new_test_ext(START_BLOCK_TIME + 1_000 * SLOT_SIZE_SECONDS * 3).execute_with(|| { + for (next_run, frequency) in [ + (SCHEDULED_TIME - SLOT_SIZE_SECONDS, SLOT_SIZE_SECONDS * 2), + (SCHEDULED_TIME, SLOT_SIZE_SECONDS * 2), + ] + .iter() + { + // prepare data + let call: RuntimeCall = frame_system::Call::remark { remark: vec![12] }.into(); + assert_noop!( + AutomationTime::schedule_dynamic_dispatch_task( + RuntimeOrigin::signed(AccountId32::new(ALICE)), + ScheduleParam::Recurring { + next_execution_time: *next_run, + frequency: *frequency + }, + Box::new(call) + ), + Error::::PastTime, + ); + } + }) +} + +// When schedule tasks using Fixed schedule, none of execution time can be too +// far in the future. all element of execution_times need to fall into +// +// When schedule tasks using recurring schedule, either: +// - next_execution_time cannot too far in the future +// - next_execution_time is closed, but the frequency is too high +// +#[test] +fn schedule_too_far_out() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + for task_far_schedule in [ + // only one time slot that is far + ScheduleParam::Fixed { + execution_times: vec![SCHEDULED_TIME + MAX_SCHEDULE_SECONDS], + }, + // the first time slot is close, but the rest are too far + ScheduleParam::Fixed { + execution_times: vec![SCHEDULED_TIME, SCHEDULED_TIME + MAX_SCHEDULE_SECONDS], + }, + // the next_execution_time is too far + ScheduleParam::Recurring { + next_execution_time: SCHEDULED_TIME + MAX_SCHEDULE_SECONDS, + frequency: SLOT_SIZE_SECONDS, + }, + // the next_execution_time is closed, but frequency is too big, make it further to + // future + ScheduleParam::Recurring { + next_execution_time: SCHEDULED_TIME, + frequency: 7 * MAX_SCHEDULE_SECONDS, + }, + ] + .iter() + { + assert_noop!( + AutomationTime::schedule_dynamic_dispatch_task( + RuntimeOrigin::signed(AccountId32::new(ALICE)), + task_far_schedule.clone(), + Box::new(frame_system::Call::remark { remark: vec![12] }.into()) + ), + Error::::TimeTooFarOut, + ); + } + }) +} + +#[test] +fn schedule_not_enough_for_fees() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + assert_noop!( + AutomationTime::schedule_dynamic_dispatch_task( + RuntimeOrigin::signed(AccountId32::new(ALICE)), + ScheduleParam::Fixed { + execution_times: vec![SCHEDULED_TIME] + }, + Box::new(frame_system::Call::remark { remark: vec![12] }.into()) + ), + Error::::InsufficientBalance, + ); + }) +} + +// test schedule transfer with dynamic dispatch. +#[test] +fn schedule_transfer_with_dynamic_dispatch() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let account_id = AccountId32::new(ALICE); + let task_id = FIRST_TASK_ID.to_vec(); + + fund_account(&account_id, 900_000_000, 2, Some(0)); + + let call: ::RuntimeCall = + pallet_balances::Call::transfer_allow_death { + dest: AccountId32::new(BOB), + value: 127, + } + .into(); + + assert_ok!(AutomationTime::schedule_dynamic_dispatch_task( + RuntimeOrigin::signed(account_id.clone()), + ScheduleParam::Fixed { + execution_times: vec![SCHEDULED_TIME] + }, + Box::new(call), + )); + + Timestamp::set_timestamp(SCHEDULED_TIME * 1_000); + LastTimeSlot::::put((LAST_BLOCK_TIME, LAST_BLOCK_TIME)); + System::reset_events(); + + AutomationTime::trigger_tasks(Weight::from_parts(900_000_000, 0)); + let my_events = events(); + + let recipient = AccountId32::new(BOB); + assert_eq!(Balances::free_balance(recipient.clone()), 127); + + let mut condition: BTreeMap, Vec> = BTreeMap::new(); + condition.insert("type".as_bytes().to_vec(), "time".as_bytes().to_vec()); + condition.insert( + "timestamp".as_bytes().to_vec(), + SCHEDULED_TIME.to_string().into_bytes(), + ); + + assert!(contains_events( + my_events, + vec![ + RuntimeEvent::AutomationTime(crate::Event::TaskTriggered { + who: account_id.clone(), + task_id: task_id.clone(), + condition, + }), + RuntimeEvent::Balances(pallet_balances::pallet::Event::Transfer { + from: account_id.clone(), + to: recipient, + amount: 127, + }), + RuntimeEvent::AutomationTime(crate::Event::TaskExecuted { + who: account_id.clone(), + task_id: task_id.clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: account_id, + task_id, + }), + ] + )); + }) +} + +// The TaskCompleted event is emitted only when the task is successfully completed. +#[test] +fn will_emit_task_completed_event_when_task_completed() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let account_id = AccountId32::new(ALICE); + let _task_id = FIRST_TASK_ID.to_vec(); + + fund_account(&account_id, 900_000_000, 2, Some(0)); + + let call: ::RuntimeCall = + frame_system::Call::remark_with_event { remark: vec![0] }.into(); + + // Schedule a task to be executed at SCHEDULED_TIME and SCHEDULED_TIME + frequency. + let next_execution_time = SCHEDULED_TIME + SLOT_SIZE_SECONDS; + assert_ok!(AutomationTime::schedule_dynamic_dispatch_task( + RuntimeOrigin::signed(account_id), + ScheduleParam::Fixed { + execution_times: vec![SCHEDULED_TIME, next_execution_time] + }, + Box::new(call), + )); + + Timestamp::set_timestamp(SCHEDULED_TIME * 1_000); + LastTimeSlot::::put((LAST_BLOCK_TIME, LAST_BLOCK_TIME)); + System::reset_events(); + + // First execution + AutomationTime::trigger_tasks(Weight::from_parts(900_000_000, 0)); + let my_events = events(); + + let event = my_events.into_iter().find(|e| { + matches!( + e, + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { .. }) + ) + }); + + if event.is_some() { + panic!("TaskCompleted event should not be emitted when task is not completed"); + } + + // Second execution + Timestamp::set_timestamp(next_execution_time * 1_000); + System::reset_events(); + AutomationTime::trigger_tasks(Weight::from_parts(900_000_000, 0)); + let my_events = events(); + + my_events + .into_iter() + .find(|e| { + matches!( + e, + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { .. }) + ) + }) + .expect("TaskCompleted event should be emitted when task is completed"); + }) +} + +// The TaskCompleted event will not be emitted when the task is canceled. +#[test] +fn will_not_emit_task_completed_event_when_task_canceled() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let account_id = AccountId32::new(ALICE); + let task_id = FIRST_TASK_ID.to_vec(); + + fund_account(&account_id, 900_000_000, 2, Some(0)); + + let call: ::RuntimeCall = + frame_system::Call::remark_with_event { remark: vec![0] }.into(); + + // Schedule a task to be executed at SCHEDULED_TIME and SCHEDULED_TIME + frequency. + let next_execution_time = SCHEDULED_TIME + SLOT_SIZE_SECONDS; + assert_ok!(AutomationTime::schedule_dynamic_dispatch_task( + RuntimeOrigin::signed(account_id.clone()), + ScheduleParam::Fixed { + execution_times: vec![SCHEDULED_TIME, next_execution_time] + }, + Box::new(call), + )); + + // First execution + Timestamp::set_timestamp(SCHEDULED_TIME * 1_000); + LastTimeSlot::::put((LAST_BLOCK_TIME, LAST_BLOCK_TIME)); + System::reset_events(); + + AutomationTime::trigger_tasks(Weight::from_parts(900_000_000, 0)); + let my_events = events(); + + let event = my_events.into_iter().find(|e| { + matches!( + e, + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { .. }) + ) + }); + + if event.is_some() { + panic!("TaskCompleted event should not be emitted when task is not completed"); + } + + assert_ok!(AutomationTime::cancel_task( + RuntimeOrigin::signed(account_id), + task_id + )); + + // Second execution + Timestamp::set_timestamp(next_execution_time * 1_000); + System::reset_events(); + AutomationTime::trigger_tasks(Weight::from_parts(900_000_000, 0)); + let my_events = events(); + + // The TaskCompleted event will not be emitted when the task is canceled + let event = my_events.into_iter().find(|e| { + matches!( + e, + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { .. }) + ) + }); + if event.is_some() { + panic!("The TaskCompleted event will not be emitted when the task is canceled"); + } + }) +} + +// The task will be remove from account tasks when the task is canceled with schedule_as. +#[test] +fn will_remove_task_from_account_tasks_when_task_canceled_with_schedule_as() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let schedule_as = AccountId32::new(DELEGATOR_ACCOUNT); + let task_owner = AccountId32::new(PROXY_ACCOUNT); + let destination = Location::new(1, X1([Parachain(PARA_ID)].into())); + let task_id = FIRST_TASK_ID.to_vec(); + + fund_account(&task_owner, 900_000_000, 2, Some(0)); + + let call: ::RuntimeCall = + frame_system::Call::remark_with_event { remark: vec![0] }.into(); + + // Schedule task + assert_ok!(AutomationTime::schedule_xcmp_task( + RuntimeOrigin::signed(task_owner.clone()), + ScheduleParam::Fixed { execution_times: vec![SCHEDULED_TIME] }, + Box::new(destination.into()), + Box::new(NATIVE_LOCATION.into()), + Box::new(AssetPayment { + asset_location: Location::new(0, Here).into(), + amount: 10 + }), + call.encode(), + Weight::from_parts(100_000, 0), + Weight::from_parts(200_000, 0), + InstructionSequence::PayThroughRemoteDerivativeAccount, + Some(schedule_as.clone()), + )); + + // Check if the task's schedule_as is correct + let task = AccountTasks::::get(task_owner.clone(), task_id.clone()); + assert!(task.is_some()); + + let task = task.unwrap(); + assert!( + matches!(task.action, Action::XCMP { schedule_as: Some(ref s), .. } if s == &schedule_as) + ); + + // Cancel task with schedule_as + assert_ok!(AutomationTime::cancel_task_with_schedule_as( + RuntimeOrigin::signed(schedule_as), + task_owner.clone(), + task_id.clone(), + )); + + // Verify that the task is no longer in the accountTasks. + assert_eq!(AutomationTime::get_account_task(task_owner, task_id), None); + }) +} + +// Calling cancel_task_with_schedule_as with a schedule_as account will cause TaskScheduleAsNotMatch error. +#[test] +fn cancel_task_with_incorrect_schedule_as_will_fail() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let schedule_as = AccountId32::new(DELEGATOR_ACCOUNT); + let task_owner = AccountId32::new(PROXY_ACCOUNT); + let destination = Location::new(1, X1([Parachain(PARA_ID)].into())); + let task_id = FIRST_TASK_ID.to_vec(); + + fund_account(&task_owner, 900_000_000, 2, Some(0)); + + let call: ::RuntimeCall = + frame_system::Call::remark_with_event { remark: vec![0] }.into(); + + // Schedule task + assert_ok!(AutomationTime::schedule_xcmp_task( + RuntimeOrigin::signed(task_owner.clone()), + ScheduleParam::Fixed { execution_times: vec![SCHEDULED_TIME] }, + Box::new(destination.into()), + Box::new(NATIVE_LOCATION.into()), + Box::new(AssetPayment { + asset_location: Location::new(0, Here).into(), + amount: 10 + }), + call.encode(), + Weight::from_parts(100_000, 0), + Weight::from_parts(200_000, 0), + InstructionSequence::PayThroughRemoteDerivativeAccount, + Some(schedule_as.clone()), + )); + + // Check if the task's schedule_as is correct + let task = AccountTasks::::get(task_owner.clone(), task_id.clone()); + assert!(task.is_some()); + + let task = task.unwrap(); + assert!( + matches!(task.action, Action::XCMP { schedule_as: Some(ref s), .. } if s == &schedule_as) + ); + + // Cancel task with incorrect schedule_as + // It will throw TaskScheduleAsNotMatch error + assert_noop!( + AutomationTime::cancel_task_with_schedule_as( + RuntimeOrigin::signed(AccountId32::new(ALICE)), + task_owner, + task_id, + ), + Error::::TaskScheduleAsNotMatch + ); + + // Assert that the task is still present in accountTasks. + assert!( + matches!(task.action, Action::XCMP { schedule_as: Some(ref s), .. } if s == &schedule_as) + ); + }) +} + +// Calling cancel_task_with_schedule_as with a non-existent taskid will cause TaskDoesNotExist error. +#[test] +fn cancel_with_schedule_as_and_non_existent_taskid_will_fail() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let schedule_as = AccountId32::new(DELEGATOR_ACCOUNT); + let task_owner = AccountId32::new(PROXY_ACCOUNT); + let task_id = FIRST_TASK_ID.to_vec(); + + fund_account(&task_owner, 900_000_000, 2, Some(0)); + + // Cancel task with non-existent taskid + // It will throw TaskDoesNotExist error + assert_noop!( + AutomationTime::cancel_task_with_schedule_as( + RuntimeOrigin::signed(schedule_as), + task_owner, + task_id, + ), + Error::::TaskDoesNotExist + ); + }) +} + +// Calling cancel_task_with_schedule_as with an incorrect owner will cause TaskDoesNotExist error. +#[test] +fn cancel_with_schedule_as_and_incorrect_owner_will_fail() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let schedule_as = AccountId32::new(DELEGATOR_ACCOUNT); + let task_owner = AccountId32::new(PROXY_ACCOUNT); + let destination = Location::new(1, X1([Parachain(PARA_ID)].into())); + let task_id = FIRST_TASK_ID.to_vec(); + + fund_account(&task_owner, 900_000_000, 2, Some(0)); + + let call: ::RuntimeCall = + frame_system::Call::remark_with_event { remark: vec![0] }.into(); + + // Schedule task + assert_ok!(AutomationTime::schedule_xcmp_task( + RuntimeOrigin::signed(task_owner.clone()), + ScheduleParam::Fixed { execution_times: vec![SCHEDULED_TIME] }, + Box::new(destination.into()), + Box::new(NATIVE_LOCATION.into()), + Box::new(AssetPayment { + asset_location: Location::new(0, Here).into(), + amount: 10 + }), + call.encode(), + Weight::from_parts(100_000, 0), + Weight::from_parts(200_000, 0), + InstructionSequence::PayThroughRemoteDerivativeAccount, + Some(schedule_as.clone()), + )); + + // Check if the task's schedule_as is correct + let task = AccountTasks::::get(task_owner, task_id.clone()); + assert!(task.is_some()); + + let task = task.unwrap(); + assert!( + matches!(task.action, Action::XCMP { schedule_as: Some(ref s), .. } if s == &schedule_as) + ); + + // Cancel task with incorrect owner + // It will throw TaskDoesNotExist error + assert_noop!( + AutomationTime::cancel_task_with_schedule_as( + RuntimeOrigin::signed(schedule_as.clone()), + AccountId32::new(ALICE), + task_id, + ), + Error::::TaskDoesNotExist + ); + + // Assert that the task is still present in accountTasks. + assert!( + matches!(task.action, Action::XCMP { schedule_as: Some(ref s), .. } if s == &schedule_as) + ); + }) +} + +// When a task fails, the TaskCompleted event will still be emitted. +#[test] +fn will_emit_task_completed_event_when_task_failed() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let account_id = AccountId32::new(ALICE); + let task_id = FIRST_TASK_ID.to_vec(); + + fund_account(&account_id, 900_000_000, 2, Some(0)); + let current_funds = Balances::free_balance(account_id.clone()); + + // Because the execution of the transfer task twice requires a total amount is larger than current balance, the second task will fail. + let call: ::RuntimeCall = + pallet_balances::Call::transfer_allow_death { + dest: AccountId32::new(BOB), + value: current_funds / 2 + 1, + } + .into(); + + // Schedule a task to be executed at SCHEDULED_TIME and SCHEDULED_TIME + frequency. + let next_execution_time = SCHEDULED_TIME + SLOT_SIZE_SECONDS; + assert_ok!(AutomationTime::schedule_dynamic_dispatch_task( + RuntimeOrigin::signed(account_id.clone()), + ScheduleParam::Fixed { + execution_times: vec![SCHEDULED_TIME, next_execution_time] + }, + Box::new(call), + )); + + // First execution + Timestamp::set_timestamp(SCHEDULED_TIME * 1_000); + LastTimeSlot::::put((LAST_BLOCK_TIME, LAST_BLOCK_TIME)); + System::reset_events(); + + AutomationTime::trigger_tasks(Weight::from_parts(900_000_000, 0)); + let my_events = events(); + + let event = my_events.into_iter().find(|e| { + matches!( + e, + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { .. }) + ) + }); + + if event.is_some() { + panic!("TaskCompleted event should not be emitted when task is not completed"); + } + + // Second execution + Timestamp::set_timestamp(next_execution_time * 1_000); + System::reset_events(); + AutomationTime::trigger_tasks(Weight::from_parts(900_000_000, 0)); + let my_events = events(); + + let mut condition: BTreeMap, Vec> = BTreeMap::new(); + condition.insert("type".as_bytes().to_vec(), "time".as_bytes().to_vec()); + condition.insert( + "timestamp".as_bytes().to_vec(), + next_execution_time.to_string().into_bytes(), + ); + + assert!(contains_events( + my_events, + vec![ + RuntimeEvent::AutomationTime(crate::Event::TaskTriggered { + who: account_id.clone(), + task_id: task_id.clone(), + condition, + }), + RuntimeEvent::AutomationTime(crate::Event::TaskExecutionFailed { + who: account_id.clone(), + task_id: task_id.clone(), + error: DispatchError::Token(FundsUnavailable), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: account_id, + task_id, + }), + ] + )) + }) +} + +#[test] +fn calculate_dynamic_dispatch_action_schedule_fee_amount_works() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let num_of_execution = generate_random_num(1, 20); + let call: ::RuntimeCall = + frame_system::Call::remark_with_event { remark: vec![0] }.into(); + let action = Action::DynamicDispatch { + encoded_call: call.encode(), + }; + + let fee_amount = AutomationTime::calculate_schedule_fee_amount(&action, num_of_execution) + .expect(EXPECT_CALCULATE_SCHEDULE_FEE_AMOUNT); + + let weight = ::WeightInfo::run_dynamic_dispatch_action() + .saturating_add(call.get_dispatch_info().weight); + let expected_schedule_fee_amount = + calculate_local_action_schedule_fee(weight, num_of_execution); + + assert_eq!(fee_amount, expected_schedule_fee_amount); + }) +} + +#[test] +fn calculate_xcmp_action_schedule_fee_amount_works() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let num_of_execution = generate_random_num(1, 20); + let action = create_xcmp_action(XcmpActionParams::default()); + let fee_amount = AutomationTime::calculate_schedule_fee_amount(&action, num_of_execution) + .expect(EXPECT_CALCULATE_SCHEDULE_FEE_AMOUNT); + let expected_schedule_fee_amount = calculate_expected_xcmp_action_schedule_fee( + action.schedule_fee_location::(), + num_of_execution, + ); + assert_eq!(fee_amount, expected_schedule_fee_amount); + }) +} + +#[test] +fn calculate_xcmp_action_schedule_fee_amount_with_different_schedule_fees_works() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + get_asset_fee_per_second_config() + .into_iter() + .for_each(|fee| { + let num_of_execution = generate_random_num(1, 20); + let action = create_xcmp_action(XcmpActionParams { + schedule_fee: fee.asset_location.clone(), + ..XcmpActionParams::default() + }); + + let fee_amount = + AutomationTime::calculate_schedule_fee_amount(&action, num_of_execution) + .expect(EXPECT_CALCULATE_SCHEDULE_FEE_AMOUNT); + + let expected_schedule_fee_amount = calculate_expected_xcmp_action_schedule_fee( + fee.asset_location, + num_of_execution, + ); + + assert_eq!(fee_amount, expected_schedule_fee_amount); + }); + }) +} + +#[test] +fn calculate_xcmp_action_schedule_fee_amount_with_absolute_or_relative_native_schedule_fee_works() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let num_of_execution = generate_random_num(1, 20); + + let action_absolute = create_xcmp_action(XcmpActionParams { + schedule_fee: SelfLocation::get(), + ..XcmpActionParams::default() + }); + let fee_amount_abosolute = + AutomationTime::calculate_schedule_fee_amount(&action_absolute, num_of_execution) + .expect(EXPECT_CALCULATE_SCHEDULE_FEE_AMOUNT); + + let action_relative = create_xcmp_action(XcmpActionParams { + schedule_fee: Location::new(0, Here), + ..XcmpActionParams::default() + }); + let fee_amount_relative = + AutomationTime::calculate_schedule_fee_amount(&action_relative, num_of_execution) + .expect(EXPECT_CALCULATE_SCHEDULE_FEE_AMOUNT); + + let expected_schedule_fee_amount = + calculate_expected_xcmp_action_schedule_fee(Location::new(0, Here), num_of_execution); + + assert_eq!(fee_amount_abosolute, fee_amount_relative); + assert_eq!(fee_amount_abosolute, expected_schedule_fee_amount); + }) +} + +#[test] +fn calculate_xcmp_action_schedule_fee_amount_with_different_destination_returns_same_result() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let num_of_execution = generate_random_num(1, 20); + + let action = create_xcmp_action(XcmpActionParams { + destination: Location::new(1, X1([Parachain(PARA_ID)].into())), + ..XcmpActionParams::default() + }); + let fee_amount = AutomationTime::calculate_schedule_fee_amount(&action, num_of_execution) + .expect(EXPECT_CALCULATE_SCHEDULE_FEE_AMOUNT); + + let action_diffrent_dest = create_xcmp_action(XcmpActionParams { + destination: Location::new(1, X1([Parachain(3000)].into())), + ..XcmpActionParams::default() + }); + let fee_amount_diffrent_dest = + AutomationTime::calculate_schedule_fee_amount(&action_diffrent_dest, num_of_execution) + .expect(EXPECT_CALCULATE_SCHEDULE_FEE_AMOUNT); + + assert_eq!(fee_amount, fee_amount_diffrent_dest); + }) +} + +#[test] +fn calculate_xcmp_action_schedule_fee_amount_with_different_execution_fee_returns_same_result() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let num_of_execution = generate_random_num(1, 20); + + let action = create_xcmp_action(XcmpActionParams { + execution_fee: *Box::new(AssetPayment { + asset_location: get_moonbase_asset_location().into(), + amount: 100, + }), + ..XcmpActionParams::default() + }); + let fee_amount = AutomationTime::calculate_schedule_fee_amount(&action, num_of_execution) + .expect(EXPECT_CALCULATE_SCHEDULE_FEE_AMOUNT); + + let action_diffrent_execution_fee = create_xcmp_action(XcmpActionParams { + execution_fee: *Box::new(AssetPayment { + asset_location: Location::new(1, X1([Parachain(3000)].into())).into(), + amount: 300, + }), + ..XcmpActionParams::default() + }); + let fee_amount_diffrent_execution_fee = AutomationTime::calculate_schedule_fee_amount( + &action_diffrent_execution_fee, + num_of_execution, + ) + .expect(EXPECT_CALCULATE_SCHEDULE_FEE_AMOUNT); + + assert_eq!(fee_amount, fee_amount_diffrent_execution_fee); + }) +} + +#[test] +fn calculate_xcmp_action_schedule_fee_amount_with_different_instruction_sequence_returns_same_result( +) { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let num_of_execution = generate_random_num(1, 20); + + let action = create_xcmp_action(XcmpActionParams { + instruction_sequence: InstructionSequence::PayThroughSovereignAccount, + ..XcmpActionParams::default() + }); + let fee_amount = AutomationTime::calculate_schedule_fee_amount(&action, num_of_execution) + .expect(EXPECT_CALCULATE_SCHEDULE_FEE_AMOUNT); + + let action_diff_instruction_sequence = create_xcmp_action(XcmpActionParams { + instruction_sequence: InstructionSequence::PayThroughRemoteDerivativeAccount, + ..XcmpActionParams::default() + }); + let fee_amount_diff_instruction_sequence = AutomationTime::calculate_schedule_fee_amount( + &action_diff_instruction_sequence, + num_of_execution, + ) + .expect(EXPECT_CALCULATE_SCHEDULE_FEE_AMOUNT); + + assert_eq!(fee_amount, fee_amount_diff_instruction_sequence); + }) +} + +#[test] +fn calculate_xcmp_action_schedule_fee_amount_with_different_schedule_as_returns_same_result() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let num_of_execution = generate_random_num(1, 20); + + let action = create_xcmp_action(XcmpActionParams { + schedule_as: Some(ALICE.into()), + ..XcmpActionParams::default() + }); + let fee_amount = AutomationTime::calculate_schedule_fee_amount(&action, num_of_execution) + .expect(EXPECT_CALCULATE_SCHEDULE_FEE_AMOUNT); + + let action_different_schedule_as = create_xcmp_action(XcmpActionParams { + schedule_as: Some(BOB.into()), + ..XcmpActionParams::default() + }); + let fee_amount_different_schedule_as = AutomationTime::calculate_schedule_fee_amount( + &action_different_schedule_as, + num_of_execution, + ) + .expect(EXPECT_CALCULATE_SCHEDULE_FEE_AMOUNT); + + assert_eq!(fee_amount, fee_amount_different_schedule_as); + }) +} + +#[test] +fn calculate_xcmp_action_schedule_fee_amount_with_unknown_schedule_fees_fails() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let num_of_execution = generate_random_num(1, 20); + let unknown_schedule_fee = Location { + parents: 1, + interior: Parachain(4000).into(), + }; + let action = create_xcmp_action(XcmpActionParams { + schedule_fee: unknown_schedule_fee, + ..XcmpActionParams::default() + }); + assert_noop!( + AutomationTime::calculate_schedule_fee_amount(&action, num_of_execution), + sp_runtime::DispatchError::Other("CouldNotDetermineFeePerSecond"), + ); + }) +} + +#[test] +fn schedule_xcmp_works() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let destination = Location::new(1, X1([Parachain(PARA_ID)].into())); + let alice = AccountId32::new(ALICE); + let call: Vec = vec![2, 4, 5]; + // Funds including XCM fees + get_xcmp_funds(alice.clone()); + + assert_ok!(AutomationTime::schedule_xcmp_task( + RuntimeOrigin::signed(alice), + ScheduleParam::Fixed { + execution_times: vec![SCHEDULED_TIME] + }, + Box::new(destination.into()), + Box::new(NATIVE_LOCATION.into()), + Box::new(AssetPayment { + asset_location: Location::new(0, Here).into(), + amount: 10 + }), + call, + Weight::from_parts(100_000, 0), + Weight::from_parts(200_000, 0), + InstructionSequence::PayThroughSovereignAccount, + None, + )); + }) +} + +#[test] +fn schedule_xcmp_task_and_check_encoded_call_success() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let alice = AccountId32::new(ALICE); + // Funds including XCM fees + get_xcmp_funds(alice.clone()); + + let origin = RuntimeOrigin::signed(alice); + let schedule = ScheduleParam::Fixed { execution_times: vec![SCHEDULED_TIME] }; + let destination: Box = Box::new(Location::new(1, X1([Parachain(PARA_ID)].into())).into()); + let schedule_fee: Box = Box::new(NATIVE_LOCATION.into()); + let execution_fee = Box::new(AssetPayment { + asset_location: Location::new(0, Here).into(), + amount: 10, + }); + let remote_encoded_call = vec![2, 4, 5]; + let encoded_call_weight = Weight::from_parts(100_000, 0); + let overall_weight = Weight::from_parts(200_000, 0); + let instruction_sequence = InstructionSequence::PayThroughSovereignAccount; + let schedule_as = None; + + // Call the schedule_xcmp_task function + assert_ok!(AutomationTime::schedule_xcmp_task( + origin, + schedule.clone(), + destination.clone(), + schedule_fee.clone(), + execution_fee.clone(), + remote_encoded_call.clone(), + encoded_call_weight, + overall_weight, + instruction_sequence, + schedule_as.clone(), + )); + + // Calculate the expected encoded call + let expected_encoded_call = Into::::into(crate::Call::schedule_xcmp_task { + schedule, + destination, + schedule_fee, + execution_fee, + encoded_call: remote_encoded_call, + encoded_call_weight, + overall_weight, + instruction_sequence, + schedule_as, + }).encode(); + + // Find the TaskScheduled event in the event list and verify if the encoded_call within it is correct. + events() + .into_iter() + .find(|e| matches!(e, RuntimeEvent::AutomationTime(crate::Event::TaskScheduled { encoded_call, .. }) if encoded_call.as_ref() == Some(&expected_encoded_call))) + .expect("TaskScheduled event should emit with correct encoded_call."); + }) +} + +#[test] +fn schedule_xcmp_works_with_multi_currency() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let destination = Location::new(1, X1([Parachain(PARA_ID)].into())); + + let alice = AccountId32::new(ALICE); + let call: Vec = vec![2, 4, 5]; + // Funds including XCM fees + get_multi_xcmp_funds(alice.clone()); + + assert_ok!(AutomationTime::schedule_xcmp_task( + RuntimeOrigin::signed(alice), + ScheduleParam::Fixed { + execution_times: vec![SCHEDULED_TIME] + }, + Box::new(destination.clone().into()), + Box::new(NATIVE_LOCATION.into()), + Box::new(AssetPayment { + asset_location: destination.into(), + amount: 10 + }), + call, + Weight::from_parts(100_000, 0), + Weight::from_parts(200_000, 0), + InstructionSequence::PayThroughSovereignAccount, + None, + )); + }) +} + +#[test] +fn schedule_xcmp_works_with_unsupported_currency_will_fail() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let destination = Location::new(1, X1([Parachain(PARA_ID)].into())); + let alice = AccountId32::new(ALICE); + let call: Vec = vec![2, 4, 5]; + // Funds including XCM fees + get_multi_xcmp_funds(alice.clone()); + + assert_noop!( + AutomationTime::schedule_xcmp_task( + RuntimeOrigin::signed(alice), + ScheduleParam::Fixed { + execution_times: vec![SCHEDULED_TIME] + }, + Box::new(destination.into()), + Box::new(NATIVE_LOCATION.into()), + Box::new(AssetPayment { + asset_location: Location::new(1, X1([Parachain(3000)].into())).into(), + amount: 10 + }), + call, + Weight::from_parts(100_000, 0), + Weight::from_parts(200_000, 0), + InstructionSequence::PayThroughSovereignAccount, + None, + ), + Error::::UnsupportedFeePayment, + ); + }) +} + +#[test] +fn schedule_xcmp_with_schedule_as_works() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let destination = Location::new(1, X1([Parachain(PARA_ID)].into( + + ))); + let delegator_account = AccountId32::new(DELEGATOR_ACCOUNT); + let proxy_account = AccountId32::new(PROXY_ACCOUNT); + let call: Vec = vec![2, 4, 5]; + + // Funds including XCM fees + get_xcmp_funds(proxy_account.clone()); + + assert_ok!(AutomationTime::schedule_xcmp_task( + RuntimeOrigin::signed(proxy_account.clone()), + ScheduleParam::Fixed { execution_times: vec![SCHEDULED_TIME] }, + Box::new(destination.clone().into()), + Box::new(Location::default().into()), + Box::new(AssetPayment { asset_location: destination.into(), amount: 10 }), + call, + Weight::from_parts(100_000, 0), + Weight::from_parts(200_000, 0), + InstructionSequence::PayThroughRemoteDerivativeAccount, + Some(delegator_account.clone()), + )); + + let tasks = AutomationTime::get_scheduled_tasks(SCHEDULED_TIME); + assert!(tasks.is_some()); + + let tasks = tasks.unwrap(); + assert_eq!(tasks.tasks[0].0, proxy_account); + + // Find the TaskScheduled event in the event list and verify if the who within it is correct. + events() + .into_iter() + .find(|e| matches!(e, RuntimeEvent::AutomationTime(crate::Event::TaskScheduled { + who, + schedule_as, + .. + }) if *who == proxy_account && *schedule_as == Some(delegator_account.clone()))) + .expect("TaskScheduled event should emit with 'who' being proxy_account, and 'schedule_as' being delegator_account."); + }) +} + +#[test] +fn schedule_xcmp_with_schedule_as_same_as_delegator_account() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let delegator_account = AccountId32::new(ALICE); + let call: Vec = vec![2, 4, 5]; + let destination = Location::new(1, X1([Parachain(PARA_ID)].into())); + + // Funds including XCM fees + get_xcmp_funds(delegator_account.clone()); + + assert_noop!( + AutomationTime::schedule_xcmp_task( + RuntimeOrigin::signed(delegator_account.clone()), + ScheduleParam::Fixed { + execution_times: vec![SCHEDULED_TIME] + }, + Box::new(destination.clone().into()), + Box::new(Location::default().into()), + Box::new(AssetPayment { + asset_location: destination.into(), + amount: 10 + }), + call, + Weight::from_parts(100_000, 0), + Weight::from_parts(200_000, 0), + InstructionSequence::PayThroughRemoteDerivativeAccount, + Some(delegator_account), + ), + sp_runtime::DispatchError::Other("proxy error: expected `ProxyType::Any`"), + ); + }) +} + +#[test] +fn schedule_xcmp_fails_if_not_enough_funds() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let para_id: u32 = 1000; + let alice = AccountId32::new(ALICE); + let call: Vec = vec![2, 4, 5]; + let destination = Location::new(1, X1([Parachain(para_id)].into())); + // Funds not including XCM fees + get_minimum_funds(alice.clone(), 1); + + assert_noop!( + AutomationTime::schedule_xcmp_task( + RuntimeOrigin::signed(alice), + ScheduleParam::Fixed { + execution_times: vec![SCHEDULED_TIME] + }, + Box::new(destination.into()), + Box::new(NATIVE_LOCATION.into()), + Box::new(AssetPayment { + asset_location: Location::new(0, Here).into(), + amount: 10000000000000 + }), + call, + Weight::from_parts(100_000, 0), + Weight::from_parts(200_000, 0), + InstructionSequence::PayThroughSovereignAccount, + None, + ), + Error::::InsufficientBalance, + ); + }) +} + +// test that we cannot schedule another task with the same id +// Because the ID is auto-generated now so to test this scenerio, we use the +// normal schedule call to schedule a task and call into a low level API +// to schedule a new task with that same last generated task id to observe +// the error +#[test] +fn schedule_duplicates_errors() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let owner = AccountId32::new(ALICE); + get_funds(owner.clone()); + let task_id = schedule_task(ALICE, vec![SCHEDULED_TIME], vec![2, 4]); + + let task2 = TaskOf::::create_event_task::( + owner, + task_id, + vec![SCHEDULED_TIME], + vec![10, 12], + vec![], + ) + .unwrap(); + + assert_noop!( + AutomationTime::schedule_task(&task2), + Error::::DuplicateTask, + ); + }) +} + +// there is an upper limit of how many time slot in Fixed Scheduled, when +// passing a large enough array we return TooManyExecutionsTimes error +#[test] +fn schedule_max_execution_times_errors() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let call: RuntimeCall = frame_system::Call::remark { + remark: vec![2, 4, 5], + } + .into(); + assert_ok!(fund_account_dynamic_dispatch( + &AccountId32::new(ALICE), + // fake schedule 4 times in the schedule param below + 4, + call.encode() + )); + assert_noop!( + AutomationTime::schedule_dynamic_dispatch_task( + RuntimeOrigin::signed(AccountId32::new(ALICE)), + ScheduleParam::Fixed { + execution_times: vec![ + SCHEDULED_TIME, + SCHEDULED_TIME + SLOT_SIZE_SECONDS, + SCHEDULED_TIME + SLOT_SIZE_SECONDS * 2, + SCHEDULED_TIME + SLOT_SIZE_SECONDS * 3 + ] + }, + Box::new( + frame_system::Call::remark { + remark: vec![2, 4, 5] + } + .into() + ) + ), + Error::::TooManyExecutionsTimes, + ); + }) +} + +// when user made mistake and pass duplicate time slot on Fixed Schedule, we +// attempt to correct it and store the corrected schedule on-chain +// Verified that the stored schedule is corrected without any duplication +#[test] +fn schedule_execution_times_removes_dupes() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let owner = AccountId32::new(ALICE); + + get_funds(owner.clone()); + let task_id1 = schedule_task( + ALICE, + vec![ + SCHEDULED_TIME, + SCHEDULED_TIME, + SCHEDULED_TIME, + SCHEDULED_TIME, + SCHEDULED_TIME + SLOT_SIZE_SECONDS * 3, + ], + vec![2, 4], + ); + + match AutomationTime::get_account_task(owner, task_id1) { + None => { + panic!("A task should exist if it was scheduled") + } + Some(task) => { + let expected_task = TaskOf::::create_event_task::( + AccountId32::new(ALICE), + vec![49, 45, 48, 45, 52], + vec![SCHEDULED_TIME, SCHEDULED_TIME + SLOT_SIZE_SECONDS * 3], + vec![2, 4], + vec![], + ) + .unwrap(); + + assert_eq!(task, expected_task); + } + } + }) +} + +// For a given tasks slot, we don't want to have too many small, light weight +// tasks or have just a handful tasks but the total weight is over the limit +// We guard with a max tasks per slot and max weight per slot. +// +// Verify that when the slot has enough tasks, new task cannot be scheduled, and +// an error TimeSlotFull is returned. +// +// we mock the MaxTasksPerSlot=2 in mocks.rs +#[test] +fn schedule_time_slot_full() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let call1: RuntimeCall = frame_system::Call::remark { remark: vec![2, 4] }.into(); + assert_ok!(fund_account_dynamic_dispatch( + &AccountId32::new(ALICE), + 1, + call1.encode() + )); + + assert_ok!(AutomationTime::schedule_dynamic_dispatch_task( + RuntimeOrigin::signed(AccountId32::new(ALICE)), + ScheduleParam::Fixed { + execution_times: vec![SCHEDULED_TIME] + }, + Box::new(call1) + )); + + let call2: RuntimeCall = frame_system::Call::remark { + remark: vec![2, 4, 5], + } + .into(); + assert_ok!(fund_account_dynamic_dispatch( + &AccountId32::new(ALICE), + 1, + call2.encode() + )); + assert_ok!(AutomationTime::schedule_dynamic_dispatch_task( + RuntimeOrigin::signed(AccountId32::new(ALICE)), + ScheduleParam::Fixed { + execution_times: vec![SCHEDULED_TIME] + }, + Box::new(call2) + )); + + let call3: RuntimeCall = frame_system::Call::remark { remark: vec![2] }.into(); + assert_ok!(fund_account_dynamic_dispatch( + &AccountId32::new(ALICE), + 1, + call3.encode() + )); + assert_noop!( + AutomationTime::schedule_dynamic_dispatch_task( + RuntimeOrigin::signed(AccountId32::new(ALICE)), + ScheduleParam::Fixed { + execution_times: vec![SCHEDULED_TIME] + }, + Box::new(call3) + ), + Error::::TimeSlotFull, + ); + }) +} + +// test case when a slot in full, we will roll back its state atomically +// and won't leave the task queue in a partial state. +// +// It's similar to above test. However, we test a task that has scheduled +// with many execution_times where as only a few execution_time slots are full +// while the rest of execution_time slots aren't full. +// +// even though other time slots aren't full, we still reject as a whole, return +// TimeSlotFull error and verify that none of the tasks has been scheduled into any +// time slot, even the one that isn't full. +// +// in other word, task scheduled is atomic, all task executions need to be able +// to put into the schedule task slots, otherwise none of data should be stored +// partially +#[test] +fn schedule_time_slot_full_rolls_back() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let call1: RuntimeCall = frame_system::Call::remark { + remark: vec![2, 4, 5], + } + .into(); + assert_ok!(fund_account_dynamic_dispatch( + &AccountId32::new(ALICE), + 1, + call1.encode() + )); + + let task_id1 = schedule_task( + ALICE, + vec![SCHEDULED_TIME + SLOT_SIZE_SECONDS * 2], + vec![2, 4, 5], + ); + + let _call2: RuntimeCall = frame_system::Call::remark { remark: vec![2, 4] }.into(); + assert_ok!(fund_account_dynamic_dispatch( + &AccountId32::new(ALICE), + 1, + call1.encode() + )); + let task_id2 = schedule_task( + ALICE, + vec![SCHEDULED_TIME + SLOT_SIZE_SECONDS * 2], + vec![2, 4], + ); + + let call: RuntimeCall = frame_system::Call::remark { remark: vec![2] }.into(); + assert_ok!(fund_account_dynamic_dispatch( + &AccountId32::new(ALICE), + 1, + call.encode() + )); + assert_noop!( + AutomationTime::schedule_dynamic_dispatch_task( + RuntimeOrigin::signed(AccountId32::new(ALICE)), + ScheduleParam::Fixed { + execution_times: vec![ + SCHEDULED_TIME, + SCHEDULED_TIME + SLOT_SIZE_SECONDS, + SCHEDULED_TIME + SLOT_SIZE_SECONDS * 2 + ] + }, + Box::new(call) + ), + Error::::TimeSlotFull, + ); + + if AutomationTime::get_scheduled_tasks(SCHEDULED_TIME).is_some() { + panic!("Tasks scheduled for the time it should have been rolled back") + } + if AutomationTime::get_scheduled_tasks(SCHEDULED_TIME + SLOT_SIZE_SECONDS).is_some() { + panic!("Tasks scheduled for the time it should have been rolled back") + } + match AutomationTime::get_scheduled_tasks(SCHEDULED_TIME + SLOT_SIZE_SECONDS * 2) { + None => { + panic!("A task should be scheduled") + } + Some(ScheduledTasksOf:: { + tasks: account_task_ids, + .. + }) => { + assert_eq!(account_task_ids.len(), 2); + assert_eq!(account_task_ids[0].1, task_id1); + assert_eq!(account_task_ids[1].1, task_id2); + } + } + }) +} + +// verify that task scheduled in different block has the right id +#[test] +fn taskid_changed_per_block() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let task_id1 = schedule_task( + ALICE, + vec![ + SCHEDULED_TIME, + SCHEDULED_TIME + SLOT_SIZE_SECONDS, + SCHEDULED_TIME + SLOT_SIZE_SECONDS * 2, + ], + vec![2, 4, 5], + ); + + System::set_block_number(20); + let task_id2 = schedule_task( + ALICE, + vec![ + SCHEDULED_TIME, + SCHEDULED_TIME + SLOT_SIZE_SECONDS, + SCHEDULED_TIME + SLOT_SIZE_SECONDS * 2, + ], + vec![2, 4, 5], + ); + LastTimeSlot::::put(( + SCHEDULED_TIME - SLOT_SIZE_SECONDS * 4, + SCHEDULED_TIME - SLOT_SIZE_SECONDS * 4, + )); + + assert_eq!(task_id1, FIRST_TASK_ID.to_vec()); + assert_eq!(task_id2, vec![50, 48, 45, 48, 45, 54]); + }) +} + +// verify that task scheduled in same block with different extrinsic index has different tx id +#[test] +fn taskid_adjusted_on_extrinsicid_on_same_block() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let first_caller = AccountId32::new(ALICE); + let message: Vec = vec![2, 4, 5]; + + let task_id1 = schedule_task( + ALICE, + vec![ + SCHEDULED_TIME, + SCHEDULED_TIME + SLOT_SIZE_SECONDS, + SCHEDULED_TIME + SLOT_SIZE_SECONDS * 2, + ], + message.clone(), + ); + + // Set to a high and more than one digit extrinsic index to test task_id also match + System::set_extrinsic_index(234); + + let second_caller = AccountId32::new(BOB); + let task_id2 = schedule_task( + BOB, + vec![ + SCHEDULED_TIME, + SCHEDULED_TIME + SLOT_SIZE_SECONDS, + SCHEDULED_TIME + SLOT_SIZE_SECONDS * 2, + ], + message.clone(), + ); + LastTimeSlot::::put(( + SCHEDULED_TIME - SLOT_SIZE_SECONDS * 4, + SCHEDULED_TIME - SLOT_SIZE_SECONDS * 4, + )); + + assert_eq!(task_id1, FIRST_TASK_ID.to_vec()); + assert_eq!(task_id2, vec![49, 45, 50, 51, 52, 45, 56]); + + // Calculate the expected encoded call + let expected_encoded_call = + Into::::into(frame_system::Call::remark_with_event { remark: message }) + .encode(); + + // Find the TaskScheduled event in the event list and verify if the encoded_call within it is correct. + assert_has_event(RuntimeEvent::AutomationTime(crate::Event::TaskScheduled { + who: first_caller, + task_id: FIRST_TASK_ID.to_vec(), + schedule_as: None, + encoded_call: Some(expected_encoded_call.clone()), + })); + + assert_has_event(RuntimeEvent::AutomationTime(crate::Event::TaskScheduled { + who: second_caller, + task_id: vec![49, 45, 50, 51, 52, 45, 56], + schedule_as: None, + encoded_call: Some(expected_encoded_call), + })); + }) +} + +// verify that task scheduled in same block with different extrinsic index has different tx id +#[test] +fn taskid_adjusted_on_eventindex_on_same_block_from_same_caller() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let caller = AccountId32::new(ALICE); + let message: Vec = vec![2, 4, 5]; + + let task_id1 = schedule_task( + ALICE, + vec![ + SCHEDULED_TIME, + SCHEDULED_TIME + SLOT_SIZE_SECONDS, + SCHEDULED_TIME + SLOT_SIZE_SECONDS * 2, + ], + message.clone(), + ); + + // Set to a high and more than one digit extrinsic index to test task_id also match + System::set_extrinsic_index(234); + + let task_id2 = schedule_task( + ALICE, + vec![ + SCHEDULED_TIME, + SCHEDULED_TIME + SLOT_SIZE_SECONDS, + SCHEDULED_TIME + SLOT_SIZE_SECONDS * 2, + ], + message.clone(), + ); + + // 1-0-3 + assert_eq!(task_id1, "1-0-3".as_bytes().to_vec()); + // 1-234-6 + assert_eq!(task_id2, "1-234-6".as_bytes().to_vec()); + + // Calculate the expected encoded call + let expected_encoded_call = + Into::::into(frame_system::Call::remark_with_event { remark: message }) + .encode(); + + // Find the TaskScheduled event in the event list and verify if the encoded_call within it is correct. + assert_has_event(RuntimeEvent::AutomationTime(crate::Event::TaskScheduled { + who: caller.clone(), + task_id: "1-0-3".as_bytes().to_vec(), + schedule_as: None, + encoded_call: Some(expected_encoded_call.clone()), + })); + + assert_has_event(RuntimeEvent::AutomationTime(crate::Event::TaskScheduled { + who: caller, + task_id: "1-234-6".as_bytes().to_vec(), + schedule_as: None, + encoded_call: Some(expected_encoded_call), + })); + }) +} + +// verify that task scheduled in same block with same extrinsic index has different event id +#[test] +fn taskid_on_same_extrinsid_have_unique_event_index() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let owner = AccountId32::new(ALICE); + let message: Vec = vec![2, 4, 5]; + + let task_id1 = schedule_task( + ALICE, + vec![ + SCHEDULED_TIME, + SCHEDULED_TIME + SLOT_SIZE_SECONDS, + SCHEDULED_TIME + SLOT_SIZE_SECONDS * 2, + ], + message.clone(), + ); + + let task_id2 = schedule_task( + ALICE, + vec![ + SCHEDULED_TIME, + SCHEDULED_TIME + SLOT_SIZE_SECONDS, + SCHEDULED_TIME + SLOT_SIZE_SECONDS * 2, + ], + message.clone(), + ); + LastTimeSlot::::put(( + SCHEDULED_TIME - SLOT_SIZE_SECONDS * 4, + SCHEDULED_TIME - SLOT_SIZE_SECONDS * 4, + )); + + assert_eq!(task_id1, FIRST_TASK_ID.to_vec()); + assert_eq!(task_id2, SECOND_TASK_ID.to_vec()); + + // Calculate the expected encoded call + let expected_encoded_call = + Into::::into(frame_system::Call::remark_with_event { remark: message }) + .encode(); + + // Find the TaskScheduled event in the event list and verify if the encoded_call within it is correct. + assert_has_event(RuntimeEvent::AutomationTime(crate::Event::TaskScheduled { + who: owner, + task_id: FIRST_TASK_ID.to_vec(), + schedule_as: None, + encoded_call: Some(expected_encoded_call), + })); + }) +} + +// verify that the owner of a task can cancel a Fixed schedule task by its id. +// In this test we focus on confirmation of canceling the task that has a single +// execution times +#[test] +fn cancel_works_for_fixed_scheduled() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let task_id1 = schedule_task(ALICE, vec![SCHEDULED_TIME], vec![2, 4, 5]); + let task_id2 = schedule_task(ALICE, vec![SCHEDULED_TIME], vec![2, 4]); + LastTimeSlot::::put(( + SCHEDULED_TIME - SLOT_SIZE_SECONDS * 4, + SCHEDULED_TIME - SLOT_SIZE_SECONDS * 4, + )); + System::reset_events(); + + assert_ok!(AutomationTime::cancel_task( + RuntimeOrigin::signed(AccountId32::new(ALICE)), + task_id1.clone(), + )); + assert_ok!(AutomationTime::cancel_task( + RuntimeOrigin::signed(AccountId32::new(ALICE)), + task_id2.clone(), + )); + + if AutomationTime::get_scheduled_tasks(SCHEDULED_TIME).is_some() { + panic!("Since there were only two tasks scheduled for the time it should have been deleted") + } + assert_eq!( + events(), + [ + RuntimeEvent::AutomationTime(crate::Event::TaskCancelled { + who: AccountId32::new(ALICE), + task_id: task_id1 + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCancelled { + who: AccountId32::new(ALICE), + task_id: task_id2, + }), + ] + ); + }) +} + +// verify that the owner of a task can cancel a Fixed schedule task by its id. +// In this test we focus on confirmation of canceling the task that has many +// execution times +#[test] +fn cancel_works_for_multiple_executions_scheduled() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let owner = AccountId32::new(ALICE); + let task_id1 = schedule_task( + ALICE, + vec![ + SCHEDULED_TIME, + SCHEDULED_TIME + SLOT_SIZE_SECONDS, + SCHEDULED_TIME + SLOT_SIZE_SECONDS * 2, + ], + vec![2, 4, 5], + ); + LastTimeSlot::::put(( + SCHEDULED_TIME - SLOT_SIZE_SECONDS * 4, + SCHEDULED_TIME - SLOT_SIZE_SECONDS * 4, + )); + System::reset_events(); + + assert_ok!(AutomationTime::cancel_task( + RuntimeOrigin::signed(owner.clone()), + task_id1.clone(), + )); + + assert_eq!( + AutomationTime::get_account_task(owner.clone(), task_id1.clone()), + None + ); + if AutomationTime::get_scheduled_tasks(SCHEDULED_TIME).is_some() { + panic!("Tasks scheduled for the time it should have been deleted") + } + if AutomationTime::get_scheduled_tasks(SCHEDULED_TIME + SLOT_SIZE_SECONDS).is_some() { + panic!("Tasks scheduled for the time it should have been deleted") + } + if AutomationTime::get_scheduled_tasks(SCHEDULED_TIME + SLOT_SIZE_SECONDS * 2).is_some() { + panic!("Tasks scheduled for the time it should have been deleted") + } + assert_eq!( + events(), + [RuntimeEvent::AutomationTime(crate::Event::TaskCancelled { + who: owner, + task_id: task_id1, + })] + ); + }) +} + +// verify that the owner of a task can cancel a Recurring schedule task by its id +#[test] +fn cancel_works_for_recurring_scheduled() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let task_id1 = + schedule_recurring_task(ALICE, SCHEDULED_TIME, SLOT_SIZE_SECONDS, vec![2, 4, 5]); + let task_id2 = + schedule_recurring_task(ALICE, SCHEDULED_TIME, SLOT_SIZE_SECONDS, vec![2, 4]); + + LastTimeSlot::::put(( + SCHEDULED_TIME - SLOT_SIZE_SECONDS * 4, + SCHEDULED_TIME - SLOT_SIZE_SECONDS * 4, + )); + System::reset_events(); + + assert_ok!(AutomationTime::cancel_task( + RuntimeOrigin::signed(AccountId32::new(ALICE)), + task_id1.clone(), + )); + assert_ok!(AutomationTime::cancel_task( + RuntimeOrigin::signed(AccountId32::new(ALICE)), + task_id2.clone(), + )); + + if AutomationTime::get_scheduled_tasks(SCHEDULED_TIME).is_some() { + panic!("Since there were only two tasks scheduled for the time it should have been deleted") + } + assert_eq!( + events(), + [ + RuntimeEvent::AutomationTime(crate::Event::TaskCancelled { + who: AccountId32::new(ALICE), + task_id: task_id1 + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCancelled { + who: AccountId32::new(ALICE), + task_id: task_id2 + }), + ] + ); + }) +} + +// given a Fixed scheduled task that has many executions time, and already ran +// at least one, we can still cancel it to prevent the rest of exectutions in +// subseuent task triggering. +#[test] +fn cancel_works_for_an_executed_task() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let owner = AccountId32::new(ALICE); + let call: RuntimeCall = frame_system::Call::remark_with_event { remark: vec![50] }.into(); + let task_id1 = schedule_dynamic_dispatch_task( + ALICE, + vec![SCHEDULED_TIME, SCHEDULED_TIME + SLOT_SIZE_SECONDS], + call, + ); + Timestamp::set_timestamp(SCHEDULED_TIME * 1_000); + LastTimeSlot::::put(( + SCHEDULED_TIME - SLOT_SIZE_SECONDS, + SCHEDULED_TIME - SLOT_SIZE_SECONDS, + )); + System::reset_events(); + + match AutomationTime::get_account_task(owner.clone(), task_id1.clone()) { + None => { + panic!("A task should exist if it was scheduled") + } + Some(task) => { + assert_eq!(task.schedule.known_executions_left(), 2); + } + } + + match AutomationTime::get_scheduled_tasks(SCHEDULED_TIME) { + None => { + panic!("A task should be scheduled") + } + Some(ScheduledTasksOf:: { + tasks: task_ids, .. + }) => { + assert_eq!(task_ids.len(), 1); + assert_eq!(task_ids[0].1, task_id1); + } + } + match AutomationTime::get_scheduled_tasks(SCHEDULED_TIME + SLOT_SIZE_SECONDS) { + None => { + panic!("A task should be scheduled") + } + Some(ScheduledTasksOf:: { + tasks: task_ids, .. + }) => { + assert_eq!(task_ids.len(), 1); + assert_eq!(task_ids[0].1, task_id1); + } + } + + AutomationTime::trigger_tasks(Weight::from_parts(200_000, 0)); + let my_events = events(); + + let mut condition: BTreeMap, Vec> = BTreeMap::new(); + condition.insert("type".as_bytes().to_vec(), "time".as_bytes().to_vec()); + condition.insert( + "timestamp".as_bytes().to_vec(), + SCHEDULED_TIME.to_string().into_bytes(), + ); + assert_eq!( + my_events, + [ + RuntimeEvent::AutomationTime(crate::Event::TaskTriggered { + who: owner.clone(), + task_id: task_id1.clone(), + condition, + }), + RuntimeEvent::System(frame_system::pallet::Event::Remarked { + sender: owner.clone(), + hash: BlakeTwo256::hash(&[50]), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskExecuted { + who: owner.clone(), + task_id: task_id1.clone(), + }), + ] + ); + match AutomationTime::get_account_task(owner.clone(), task_id1.clone()) { + None => { + panic!("A task should exist if it was scheduled") + } + Some(task) => { + assert_eq!(task.schedule.known_executions_left(), 1); + } + } + + assert_eq!(AutomationTime::get_scheduled_tasks(SCHEDULED_TIME), None); + match AutomationTime::get_scheduled_tasks(SCHEDULED_TIME + SLOT_SIZE_SECONDS) { + None => { + panic!("A task should be scheduled") + } + Some(ScheduledTasksOf:: { + tasks: task_ids, .. + }) => { + assert_eq!(task_ids.len(), 1); + assert_eq!(task_ids[0].1, task_id1); + } + } + + assert_ok!(AutomationTime::cancel_task( + RuntimeOrigin::signed(AccountId32::new(ALICE)), + task_id1.clone() + )); + + assert_eq!(AutomationTime::get_scheduled_tasks(SCHEDULED_TIME), None); + assert_eq!( + AutomationTime::get_scheduled_tasks(SCHEDULED_TIME + SLOT_SIZE_SECONDS), + None + ); + + assert_eq!( + AutomationTime::get_account_task(owner.clone(), task_id1.clone()), + None + ); + assert_eq!( + events(), + [RuntimeEvent::AutomationTime(crate::Event::TaskCancelled { + who: owner, + task_id: task_id1, + })] + ); + }) +} + +// verify that if a tasks is already moved from the schedule slot into the task +// queue, it can still get canceling using its id. +#[test] +fn cancel_works_for_tasks_in_queue() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let task_id = add_task_to_task_queue( + ALICE, + vec![40], + vec![SCHEDULED_TIME], + create_dynamic_dispatch_remark_action(vec![2, 4, 5]), + vec![], + ); + LastTimeSlot::::put((SCHEDULED_TIME, SCHEDULED_TIME)); + + assert_eq!(task_id, AutomationTime::get_task_queue()[0].1); + assert_eq!(1, AutomationTime::get_task_queue().len()); + + assert_ok!(AutomationTime::cancel_task( + RuntimeOrigin::signed(AccountId32::new(ALICE)), + task_id.clone(), + )); + + assert_eq!( + events(), + [RuntimeEvent::AutomationTime(crate::Event::TaskCancelled { + who: AccountId32::new(ALICE), + task_id + }),] + ); + assert_eq!(0, AutomationTime::get_task_queue().len()); + }) +} + +// verify that when cancelling a non-existed tasks, an error will be return +#[test] +fn cancel_task_must_exist() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + //let task_id = BlakeTwo256::hash_of(&task); + let task_id = vec![49, 45, 48, 45, 52]; + + assert_noop!( + AutomationTime::cancel_task(RuntimeOrigin::signed(AccountId32::new(ALICE)), task_id), + Error::::TaskDoesNotExist, + ); + }) +} + +// verify if an account has a task id in its AccountTasks storage, but the +// actual task doesn't exist in any schedule slot or task queue then the cancel +// succeed to remove the task id from AccountTasks storage, but throwing an +// extra TaskNotFound event beside the normal TaskCancelled evented +// +#[test] +fn cancel_task_not_found() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let owner = AccountId32::new(ALICE); + let task = TaskOf::::create_event_task::( + owner.clone(), + vec![40], + vec![SCHEDULED_TIME], + vec![2, 4, 5], + vec![], + ) + .unwrap(); + let task_id = vec![49, 45, 48, 45, 49]; + AccountTasks::::insert(owner.clone(), task_id.clone(), task); + + assert_ok!(AutomationTime::cancel_task( + RuntimeOrigin::signed(owner.clone()), + task_id.clone(), + )); + assert_eq!( + events(), + [ + RuntimeEvent::AutomationTime(crate::Event::TaskNotFound { + who: owner.clone(), + task_id: task_id.clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCancelled { + who: owner, + task_id: task_id.clone() + }) + ] + ); + + // now ensure the task id is also removed from AccountTasks + assert_noop!( + AutomationTime::cancel_task(RuntimeOrigin::signed(AccountId32::new(ALICE)), task_id), + Error::::TaskDoesNotExist, + ); + }) +} + +// verify only the owner of the task can cancel it +#[test] +fn cancel_task_fail_non_owner() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let owner = AccountId32::new(ALICE); + let task_id1 = schedule_task( + ALICE, + vec![ + SCHEDULED_TIME, + SCHEDULED_TIME + SLOT_SIZE_SECONDS, + SCHEDULED_TIME + SLOT_SIZE_SECONDS * 2, + ], + vec![2, 4, 5], + ); + LastTimeSlot::::put(( + SCHEDULED_TIME - SLOT_SIZE_SECONDS * 4, + SCHEDULED_TIME - SLOT_SIZE_SECONDS * 4, + )); + + System::reset_events(); + + // BOB cannot cancel because he isn't the task owner + assert_noop!( + AutomationTime::cancel_task( + RuntimeOrigin::signed(AccountId32::new(BOB)), + task_id1.clone() + ), + Error::::TaskDoesNotExist, + ); + + // But Alice can cancel as expected + assert_ok!(AutomationTime::cancel_task( + RuntimeOrigin::signed(owner), + task_id1, + )); + }) +} + +// verifying that root/sudo can force_cancel anybody's tasks +#[test] +fn force_cancel_task_works() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let task_id = schedule_task(ALICE, vec![SCHEDULED_TIME], vec![2, 4, 5]); + LastTimeSlot::::put(( + SCHEDULED_TIME - SLOT_SIZE_SECONDS * 4, + SCHEDULED_TIME - SLOT_SIZE_SECONDS * 4, + )); + System::reset_events(); + + assert_ok!(AutomationTime::force_cancel_task( + RawOrigin::Root.into(), + AccountId32::new(ALICE), + task_id.clone() + )); + assert_eq!( + events(), + [RuntimeEvent::AutomationTime(crate::Event::TaskCancelled { + who: AccountId32::new(ALICE), + task_id + }),] + ); + }) +} + +mod extrinsics { + use super::*; + + mod schedule_dynamic_dispatch_task { + use super::*; + + #[test] + fn works() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let account_id = AccountId32::new(ALICE); + let execution_times = [SCHEDULED_TIME]; + let call: RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + assert_ok!(fund_account_dynamic_dispatch( + &account_id, + execution_times.len(), + call.encode() + )); + + assert_ok!(AutomationTime::schedule_dynamic_dispatch_task( + RuntimeOrigin::signed(account_id.clone()), + ScheduleParam::Fixed { + execution_times: vec![SCHEDULED_TIME] + }, + Box::new(call.clone()) + )); + assert_eq!( + last_event(), + RuntimeEvent::AutomationTime(crate::Event::TaskScheduled { + who: account_id, + task_id: FIRST_TASK_ID.to_vec(), + schedule_as: None, + encoded_call: Some(call.encode()), + }) + ); + }) + } + } +} + +mod run_dynamic_dispatch_action { + use super::*; + + #[test] + fn cannot_decode() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let account_id = AccountId32::new(ALICE); + let bad_encoded_call: Vec = vec![1]; + + let (weight, error) = + AutomationTime::run_dynamic_dispatch_action(account_id, bad_encoded_call); + + assert_eq!( + weight, + ::WeightInfo::run_dynamic_dispatch_action_fail_decode() + ); + + assert_eq!( + error, + Some(DispatchError::from(Error::::CallCannotBeDecoded)) + ); + }) + } + + #[test] + fn call_errors() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let account_id = AccountId32::new(ALICE); + let call: RuntimeCall = frame_system::Call::set_code { code: vec![] }.into(); + let encoded_call = call.encode(); + + let (_, error) = AutomationTime::run_dynamic_dispatch_action(account_id, encoded_call); + + assert_eq!(error, Some(DispatchError::BadOrigin)); + }) + } + + #[test] + fn call_filtered() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let account_id = AccountId32::new(ALICE); + let call: RuntimeCall = pallet_timestamp::Call::set { now: 100 }.into(); + let encoded_call = call.encode(); + + let (_, error) = AutomationTime::run_dynamic_dispatch_action(account_id, encoded_call); + + assert_eq!( + error, + Some(DispatchError::from( + frame_system::Error::::CallFiltered + )) + ); + }) + } + + #[test] + fn call_works() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let account_id = AccountId32::new(ALICE); + let call: RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + let encoded_call = call.encode(); + + let (_, error) = AutomationTime::run_dynamic_dispatch_action(account_id, encoded_call); + + assert_eq!(error, None); + }) + } +} + +// Weights to use for tests below +// 10_000v: run per missed task (run_missed_tasks_many_found) +// 10_000v: run per task not found in map (run_missed_tasks_many_missing, run_tasks_many_missing) +// 50_000v: weight check for running 1 more task, current static v=1 (run_tasks_many_found) +// 10_000: update task queue function overhead (update_task_queue_overhead) +// 20_000: update task queue for scheduled tasks (update_scheduled_task_queue) +// 20_000v: for each old time slot to missed tasks (append_to_missed_tasks) +// 20_000: to move a single time slot to missed tasks (shift_missed_tasks) + +// ensure the first task trigger for first block run properly without error +// and will not emit any event +#[test] +fn trigger_tasks_handles_first_run() { + new_test_ext(0).execute_with(|| { + AutomationTime::trigger_tasks(Weight::from_parts(30_000, 0)); + + assert_eq!(events(), vec![],); + }) +} + +// verify when having no tasks, the trigger run to the end without error +// and there is no emitted event +#[test] +fn trigger_tasks_nothing_to_do() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + LastTimeSlot::::put((LAST_BLOCK_TIME, LAST_BLOCK_TIME)); + + AutomationTime::trigger_tasks(Weight::from_parts(30_000, 0)); + + assert_eq!(events(), vec![],); + }) +} + +// when calling trigger_tasks verifyign that the tasks in schedule of +// current slot are properly moved into the task queue. MissedTask will be moved +// into missed queue +#[test] +fn trigger_tasks_updates_queues() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let missed_task_id = add_task_to_task_queue( + ALICE, + vec![40], + vec![SCHEDULED_TIME - SLOT_SIZE_SECONDS], + create_dynamic_dispatch_remark_action(vec![40]), + vec![], + ); + let missed_task = MissedTaskV2Of::::new( + AccountId32::new(ALICE), + missed_task_id, + SCHEDULED_TIME - SLOT_SIZE_SECONDS, + ); + assert_eq!(AutomationTime::get_missed_queue().len(), 0); + let scheduled_task_id = schedule_task(ALICE, vec![SCHEDULED_TIME], vec![50]); + Timestamp::set_timestamp(SCHEDULED_TIME * 1_000); + LastTimeSlot::::put(( + SCHEDULED_TIME - SLOT_SIZE_SECONDS, + SCHEDULED_TIME - SLOT_SIZE_SECONDS, + )); + System::reset_events(); + + AutomationTime::trigger_tasks(Weight::from_parts(50_000, 0)); + + assert_eq!(AutomationTime::get_missed_queue().len(), 1); + assert_eq!(AutomationTime::get_missed_queue()[0], missed_task); + assert_eq!(AutomationTime::get_task_queue().len(), 1); + assert_eq!(AutomationTime::get_task_queue()[0].1, scheduled_task_id); + assert_eq!(AutomationTime::get_scheduled_tasks(SCHEDULED_TIME), None); + assert_eq!(events(), vec![],); + }) +} + +// Verified tests that were scheduled in a past slot will be moved into MissQueue +// Tasks in current time slot will be process as many as possible up to the max +// weight +// In this test, we purposely set the weight so it won't process the miss tasks, +// just make sure the missed slot's tasks are moved into missed queue +#[test] +fn trigger_tasks_handles_missed_slots() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let owner = AccountId32::new(ALICE); + let call: ::RuntimeCall = + frame_system::Call::remark_with_event { remark: vec![40] }.into(); + + add_task_to_task_queue( + ALICE, + vec![40], + vec![SCHEDULED_TIME], + Action::DynamicDispatch { + encoded_call: call.encode(), + }, + vec![], + ); + + assert_eq!(AutomationTime::get_missed_queue().len(), 0); + + let missed_task_id = + schedule_task(ALICE, vec![SCHEDULED_TIME - SLOT_SIZE_SECONDS], vec![50]); + let missed_task = MissedTaskV2Of::::new( + AccountId32::new(ALICE), + missed_task_id, + SCHEDULED_TIME - SLOT_SIZE_SECONDS, + ); + + let remark_message = vec![50]; + let call: ::RuntimeCall = + frame_system::Call::remark_with_event { + remark: remark_message.clone(), + } + .into(); + let task_will_be_run_id = schedule_dynamic_dispatch_task(ALICE, vec![SCHEDULED_TIME], call); + let scheduled_task_id = schedule_task(ALICE, vec![SCHEDULED_TIME], vec![50]); + + Timestamp::set_timestamp(SCHEDULED_TIME * 1_000); + LastTimeSlot::::put(( + SCHEDULED_TIME - SLOT_SIZE_SECONDS * 2, + SCHEDULED_TIME - SLOT_SIZE_SECONDS * 2, + )); + System::reset_events(); + + // Give this enough weight limit to run and process miss queue and generate miss event + AutomationTime::trigger_tasks(Weight::from_parts(900_000 * 2 + 40_000, 0)); + + // the first 2 tasks are missed + assert_eq!(AutomationTime::get_missed_queue().len(), 2); + assert_eq!(AutomationTime::get_missed_queue()[1], missed_task); + + // the final one is in current schedule will be move into the task queue + assert_eq!(AutomationTime::get_task_queue().len(), 1); + assert_eq!(AutomationTime::get_task_queue()[0].1, scheduled_task_id); + + let mut condition: BTreeMap, Vec> = BTreeMap::new(); + condition.insert("type".as_bytes().to_vec(), "time".as_bytes().to_vec()); + condition.insert( + "timestamp".as_bytes().to_vec(), + SCHEDULED_TIME.to_string().into_bytes(), + ); + + assert_eq!( + events(), + vec![ + RuntimeEvent::AutomationTime(crate::Event::TaskTriggered { + who: owner.clone(), + task_id: task_will_be_run_id.clone(), + condition, + }), + RuntimeEvent::System(frame_system::pallet::Event::Remarked { + sender: AccountId32::new(ALICE), + hash: BlakeTwo256::hash(&remark_message), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskExecuted { + who: owner, + task_id: task_will_be_run_id.clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: AccountId32::new(ALICE), + task_id: task_will_be_run_id, + }), + ], + ); + }) +} + +// Verify logic of handling missing tasks as below: +// - task in current slot always got process first, +// - past time schedule is retain and will eventually be moved into MissedQueueV2 +// from there, we generate a TaskMissed event, then the task is completely +// removed from the queue +// - existing tasks in the queue (from previous run) will also be moved to +// MissedQueueV2, and yield a task miss event +// - we don't backfill or run old tasks. +// +// The execution of task missed event generation is lower priority, tasks in the +// time slot got run first, if there is enough weight left, only then we run +// the task miss event and doing house cleanup +#[test] +fn trigger_tasks_limits_missed_slots() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let call: ::RuntimeCall = + frame_system::Call::remark_with_event { remark: vec![50] }.into(); + + let missing_task_id0 = add_task_to_task_queue( + ALICE, + vec![40], + vec![SCHEDULED_TIME], + Action::DynamicDispatch { + encoded_call: call.encode(), + }, + vec![], + ); + + assert_eq!(AutomationTime::get_missed_queue().len(), 0); + + Timestamp::set_timestamp((SCHEDULED_TIME - 25200) * 1_000); + let missing_task_id1 = + schedule_task(ALICE, vec![SCHEDULED_TIME - SLOT_SIZE_SECONDS], vec![50]); + + let missing_task_id2 = schedule_task( + ALICE, + vec![SCHEDULED_TIME - SLOT_SIZE_SECONDS * 2], + vec![50], + ); + let missing_task_id3 = schedule_task( + ALICE, + vec![SCHEDULED_TIME - SLOT_SIZE_SECONDS * 3], + vec![50], + ); + let missing_task_id4 = schedule_task( + ALICE, + vec![SCHEDULED_TIME - SLOT_SIZE_SECONDS * 4], + vec![50], + ); + let missing_task_id5 = schedule_task( + ALICE, + vec![SCHEDULED_TIME - SLOT_SIZE_SECONDS * 5], + vec![50], + ); + + let remark_message = vec![50]; + let call: ::RuntimeCall = + frame_system::Call::remark_with_event { + remark: remark_message.clone(), + } + .into(); + let task_id = schedule_dynamic_dispatch_task(ALICE, vec![SCHEDULED_TIME], call); + + Timestamp::set_timestamp(SCHEDULED_TIME * 1_000); + LastTimeSlot::::put(( + SCHEDULED_TIME - SLOT_SIZE_SECONDS * 7, + SCHEDULED_TIME - SLOT_SIZE_SECONDS * 7, + )); + System::reset_events(); + + AutomationTime::trigger_tasks(Weight::from_parts(9_769_423 + 200_000, 0)); + + let my_events = events(); + + let owner = AccountId32::new(ALICE); + + if let Some((updated_last_time_slot, updated_last_missed_slot)) = + AutomationTime::get_last_slot() + { + assert_eq!(updated_last_time_slot, SCHEDULED_TIME); + + // This line ensures when given a total weight of 9_769_423 + 200_000, missing_task_id5, missing_task_id4, missing_task_id3 and missing_task_id2 will be discarded from the missed_queue in the current version of code. + // TODO: we should examine the tasks in missed_queue instead of examing the timestamp of missing_task_id2 + assert_eq!( + updated_last_missed_slot, + SCHEDULED_TIME - SLOT_SIZE_SECONDS * 2 + ); + + let mut condition: BTreeMap, Vec> = BTreeMap::new(); + condition.insert("type".as_bytes().to_vec(), "time".as_bytes().to_vec()); + condition.insert( + "timestamp".as_bytes().to_vec(), + SCHEDULED_TIME.to_string().into_bytes(), + ); + + assert_eq!( + my_events, + [ + // The execution of encoded call task + RuntimeEvent::AutomationTime(crate::Event::TaskTriggered { + who: owner.clone(), + task_id: task_id.clone(), + condition: condition.clone(), + }), + RuntimeEvent::System(frame_system::pallet::Event::Remarked { + sender: owner.clone(), + hash: BlakeTwo256::hash(&remark_message), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskExecuted { + who: owner.clone(), + task_id: task_id.clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner.clone(), + task_id, + }), + // The task 0 missed + RuntimeEvent::AutomationTime(crate::Event::TaskMissed { + who: owner.clone(), + task_id: missing_task_id0.clone(), + execution_time: SCHEDULED_TIME - SLOT_SIZE_SECONDS * 7, + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner.clone(), + task_id: missing_task_id0, + }), + // The task 5 missed + RuntimeEvent::AutomationTime(crate::Event::TaskMissed { + who: owner.clone(), + task_id: missing_task_id5.clone(), + execution_time: SCHEDULED_TIME - SLOT_SIZE_SECONDS * 5, + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner.clone(), + task_id: missing_task_id5, + }), + // The task 4 missed + RuntimeEvent::AutomationTime(crate::Event::TaskMissed { + who: owner.clone(), + task_id: missing_task_id4.clone(), + execution_time: SCHEDULED_TIME - SLOT_SIZE_SECONDS * 4, + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner.clone(), + task_id: missing_task_id4, + }), + // The task 3 missed + RuntimeEvent::AutomationTime(crate::Event::TaskMissed { + who: owner.clone(), + task_id: missing_task_id3.clone(), + execution_time: SCHEDULED_TIME - SLOT_SIZE_SECONDS * 3, + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner.clone(), + task_id: missing_task_id3, + }), + // The task 2 missed + RuntimeEvent::AutomationTime(crate::Event::TaskMissed { + who: owner.clone(), + task_id: missing_task_id2.clone(), + execution_time: SCHEDULED_TIME - SLOT_SIZE_SECONDS * 2, + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner, + task_id: missing_task_id2, + }), + ] + ); + } else { + panic!("trigger_tasks_limits_missed_slots test did not have LastTimeSlot updated") + } + + match AutomationTime::get_scheduled_tasks(SCHEDULED_TIME - SLOT_SIZE_SECONDS) { + None => { + panic!("A task should be scheduled") + } + Some(ScheduledTasksOf:: { + tasks: account_task_ids, + .. + }) => { + assert_eq!(account_task_ids.len(), 1); + assert_eq!(account_task_ids[0].1, missing_task_id1); + } + } + }) +} + +#[test] +fn trigger_tasks_completes_all_tasks() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let message_one: Vec = vec![2, 4, 5]; + let owner = AccountId32::new(ALICE); + let task_id1 = add_task_to_task_queue( + ALICE, + vec![40], + vec![SCHEDULED_TIME], + create_dynamic_dispatch_remark_action(message_one.clone()), + vec![], + ); + let message_two: Vec = vec![2, 4]; + let task_id2 = add_task_to_task_queue( + ALICE, + vec![50], + vec![SCHEDULED_TIME], + create_dynamic_dispatch_remark_action(message_two.clone()), + vec![], + ); + LastTimeSlot::::put((LAST_BLOCK_TIME, LAST_BLOCK_TIME)); + + AutomationTime::trigger_tasks(Weight::from_parts(20_000_000, 0)); + + let mut condition: BTreeMap, Vec> = BTreeMap::new(); + condition.insert("type".as_bytes().to_vec(), "time".as_bytes().to_vec()); + condition.insert( + "timestamp".as_bytes().to_vec(), + LAST_BLOCK_TIME.to_string().into_bytes(), + ); + + assert_eq!( + events(), + [ + RuntimeEvent::AutomationTime(crate::Event::TaskTriggered { + who: owner.clone(), + task_id: task_id1.clone(), + condition: condition.clone(), + }), + RuntimeEvent::System(frame_system::pallet::Event::Remarked { + sender: owner.clone(), + hash: BlakeTwo256::hash(&message_one), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskExecuted { + who: owner.clone(), + task_id: task_id1.clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner.clone(), + task_id: task_id1.clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskTriggered { + who: owner.clone(), + task_id: task_id2.clone(), + condition, + }), + RuntimeEvent::System(frame_system::pallet::Event::Remarked { + sender: owner.clone(), + hash: BlakeTwo256::hash(&message_two), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskExecuted { + who: owner.clone(), + task_id: task_id2.clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner, + task_id: task_id2.clone(), + }), + ] + ); + assert_eq!(0, AutomationTime::get_task_queue().len()); + assert_eq!( + AutomationTime::get_account_task(AccountId32::new(ALICE), task_id1), + None + ); + assert_eq!( + AutomationTime::get_account_task(AccountId32::new(ALICE), task_id2), + None + ); + }) +} + +#[test] +fn trigger_tasks_handles_nonexisting_tasks() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let owner = AccountId32::new(ALICE); + let bad_task_id = vec![1, 2, 3]; + let mut task_queue = AutomationTime::get_task_queue(); + task_queue.push((owner.clone(), bad_task_id.clone())); + TaskQueueV2::::put(task_queue); + LastTimeSlot::::put((LAST_BLOCK_TIME, LAST_BLOCK_TIME)); + + AutomationTime::trigger_tasks(Weight::from_parts(90_000, 0)); + + assert_eq!( + events(), + [RuntimeEvent::AutomationTime(crate::Event::TaskNotFound { + who: owner, + task_id: bad_task_id + }),] + ); + assert_eq!(0, AutomationTime::get_task_queue().len()); + }) +} + +#[test] +fn trigger_tasks_completes_some_tasks() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let owner = AccountId32::new(ALICE); + let message_one: Vec = vec![2, 4, 5]; + let task_id1 = add_task_to_task_queue( + ALICE, + vec![40], + vec![SCHEDULED_TIME], + create_dynamic_dispatch_remark_action(message_one.clone()), + vec![], + ); + let message_two: Vec = vec![2, 4]; + let task_id2 = add_task_to_task_queue( + ALICE, + vec![50], + vec![SCHEDULED_TIME], + create_dynamic_dispatch_remark_action(message_two), + vec![], + ); + LastTimeSlot::::put((LAST_BLOCK_TIME, LAST_BLOCK_TIME)); + + AutomationTime::trigger_tasks(Weight::from_parts(80_000, 0)); + + let mut condition: BTreeMap, Vec> = BTreeMap::new(); + condition.insert("type".as_bytes().to_vec(), "time".as_bytes().to_vec()); + condition.insert( + "timestamp".as_bytes().to_vec(), + LAST_BLOCK_TIME.to_string().into_bytes(), + ); + + assert_eq!( + events(), + [ + RuntimeEvent::AutomationTime(crate::Event::TaskTriggered { + who: owner.clone(), + task_id: task_id1.clone(), + condition, + }), + RuntimeEvent::System(frame_system::pallet::Event::Remarked { + sender: owner.clone(), + hash: BlakeTwo256::hash(&message_one), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskExecuted { + who: owner.clone(), + task_id: task_id1.clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner.clone(), + task_id: task_id1.clone(), + }), + ] + ); + + assert_eq!(1, AutomationTime::get_task_queue().len()); + assert_eq!( + AutomationTime::get_account_task(owner.clone(), task_id1), + None + ); + assert_ne!(AutomationTime::get_account_task(owner, task_id2), None); + }) +} + +#[test] +fn trigger_tasks_completes_all_missed_tasks() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let task_id1 = add_task_to_missed_queue( + ALICE, + vec![40], + vec![SCHEDULED_TIME], + create_dynamic_dispatch_remark_action(vec![40]), + vec![], + ); + let task_id2 = add_task_to_missed_queue( + ALICE, + vec![50], + vec![SCHEDULED_TIME], + create_dynamic_dispatch_remark_action(vec![40]), + vec![], + ); + LastTimeSlot::::put((LAST_BLOCK_TIME, LAST_BLOCK_TIME)); + + AutomationTime::trigger_tasks(Weight::from_parts(130_000, 0)); + + let owner = AccountId32::new(ALICE); + + assert_eq!( + events(), + [ + RuntimeEvent::AutomationTime(crate::Event::TaskMissed { + who: owner.clone(), + task_id: task_id1.clone(), + execution_time: SCHEDULED_TIME + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner.clone(), + task_id: task_id1.clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskMissed { + who: owner.clone(), + task_id: task_id2.clone(), + execution_time: SCHEDULED_TIME + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner, + task_id: task_id2.clone(), + }), + ] + ); + + assert_eq!(AutomationTime::get_missed_queue().len(), 0); + assert_eq!( + AutomationTime::get_account_task(AccountId32::new(ALICE), task_id1), + None + ); + assert_eq!( + AutomationTime::get_account_task(AccountId32::new(ALICE), task_id2), + None + ); + }) +} + +#[test] +fn missed_tasks_updates_executions_left() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let owner = AccountId32::new(ALICE); + let task_id1 = add_task_to_missed_queue( + ALICE, + vec![40], + vec![SCHEDULED_TIME, SCHEDULED_TIME + SLOT_SIZE_SECONDS], + create_dynamic_dispatch_remark_action(vec![40]), + vec![], + ); + let task_id2 = add_task_to_missed_queue( + ALICE, + vec![50], + vec![SCHEDULED_TIME, SCHEDULED_TIME + SLOT_SIZE_SECONDS], + create_dynamic_dispatch_remark_action(vec![40]), + vec![], + ); + + match AutomationTime::get_account_task(owner.clone(), task_id1.clone()) { + None => { + panic!("A task should exist if it was scheduled") + } + Some(task) => { + assert_eq!(task.schedule.known_executions_left(), 2); + } + } + match AutomationTime::get_account_task(owner.clone(), task_id2.clone()) { + None => { + panic!("A task should exist if it was scheduled") + } + Some(task) => { + assert_eq!(task.schedule.known_executions_left(), 2); + } + } + + LastTimeSlot::::put((LAST_BLOCK_TIME, LAST_BLOCK_TIME)); + AutomationTime::trigger_tasks(Weight::from_parts(130_000, 0)); + + assert_eq!( + events(), + [ + RuntimeEvent::AutomationTime(crate::Event::TaskMissed { + who: AccountId32::new(ALICE), + task_id: task_id1.clone(), + execution_time: SCHEDULED_TIME + }), + RuntimeEvent::AutomationTime(crate::Event::TaskMissed { + who: AccountId32::new(ALICE), + task_id: task_id2.clone(), + execution_time: SCHEDULED_TIME + }), + ] + ); + + assert_eq!(AutomationTime::get_missed_queue().len(), 0); + match AutomationTime::get_account_task(owner.clone(), task_id1) { + None => { + panic!("A task should exist if it was scheduled") + } + Some(task) => { + assert_eq!(task.schedule.known_executions_left(), 1); + } + } + match AutomationTime::get_account_task(owner, task_id2) { + None => { + panic!("A task should exist if it was scheduled") + } + Some(task) => { + assert_eq!(task.schedule.known_executions_left(), 1); + } + } + }) +} + +#[test] +fn missed_tasks_removes_completed_tasks() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let message_one: Vec = vec![2, 5, 7]; + let owner = AccountId32::new(ALICE); + let task_id01 = add_task_to_missed_queue( + ALICE, + vec![40], + vec![SCHEDULED_TIME, SCHEDULED_TIME - SLOT_SIZE_SECONDS], + create_dynamic_dispatch_remark_action(message_one.clone()), + vec![], + ); + + let mut task_queue = AutomationTime::get_task_queue(); + task_queue.push((owner.clone(), task_id01.clone())); + TaskQueueV2::::put(task_queue); + + assert_eq!(AutomationTime::get_missed_queue().len(), 1); + assert_eq!(AutomationTime::get_task_queue().len(), 1); + match AutomationTime::get_account_task(owner.clone(), task_id01.clone()) { + None => { + panic!("A task should exist if it was scheduled") + } + Some(task) => { + assert_eq!(task.schedule.known_executions_left(), 2); + } + } + + LastTimeSlot::::put((LAST_BLOCK_TIME, LAST_BLOCK_TIME)); + System::reset_events(); + AutomationTime::trigger_tasks(Weight::from_parts(20_000_000, 0)); + + assert_eq!(AutomationTime::get_task_queue().len(), 0); + assert_eq!(AutomationTime::get_missed_queue().len(), 0); + + let mut condition: BTreeMap, Vec> = BTreeMap::new(); + condition.insert("type".as_bytes().to_vec(), "time".as_bytes().to_vec()); + condition.insert( + "timestamp".as_bytes().to_vec(), + LAST_BLOCK_TIME.to_string().into_bytes(), + ); + + assert_eq!( + events(), + [ + RuntimeEvent::AutomationTime(crate::Event::TaskTriggered { + who: owner.clone(), + task_id: task_id01.clone(), + condition: condition.clone(), + }), + RuntimeEvent::System(frame_system::pallet::Event::Remarked { + sender: owner.clone(), + hash: BlakeTwo256::hash(&message_one), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskExecuted { + who: owner.clone(), + task_id: task_id01.clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskMissed { + who: owner.clone(), + task_id: task_id01.clone(), + execution_time: SCHEDULED_TIME + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner.clone(), + task_id: task_id01.clone(), + }), + ] + ); + assert_eq!(AutomationTime::get_account_task(owner, task_id01), None); + }) +} + +#[test] +fn trigger_tasks_completes_some_xcmp_tasks() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let destination = Location::new(1, X1([Parachain(PARA_ID)].into())); + let encoded_call = vec![3, 4, 5]; + let owner = AccountId32::new(ALICE); + let task_id = add_task_to_task_queue( + ALICE, + vec![40], + vec![SCHEDULED_TIME], + Action::XCMP { + destination, + schedule_fee: NATIVE_LOCATION, + execution_fee: Box::new(AssetPayment { + asset_location: Location::new(0, Here).into(), + amount: 10, + }), + encoded_call, + encoded_call_weight: Weight::from_parts(100_000, 0), + overall_weight: Weight::from_parts(200_000, 0), + schedule_as: None, + instruction_sequence: InstructionSequence::PayThroughSovereignAccount, + }, + vec![], + ); + + LastTimeSlot::::put((LAST_BLOCK_TIME, LAST_BLOCK_TIME)); + System::reset_events(); + + AutomationTime::trigger_tasks(Weight::from_parts(120_000, 0)); + + let mut condition: BTreeMap, Vec> = BTreeMap::new(); + condition.insert("type".as_bytes().to_vec(), "time".as_bytes().to_vec()); + condition.insert( + "timestamp".as_bytes().to_vec(), + LAST_BLOCK_TIME.to_string().into_bytes(), + ); + + assert_eq!( + events(), + [ + RuntimeEvent::AutomationTime(crate::Event::TaskTriggered { + who: owner.clone(), + task_id: task_id.clone(), + condition, + }), + RuntimeEvent::AutomationTime(crate::Event::TaskExecuted { + who: owner.clone(), + task_id: task_id.clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner, + task_id + }) + ] + ); + }) +} + +// verify that execution left of a Fixed scheduled task will be decreased by +// one upon a succesful run. +#[test] +fn trigger_tasks_updates_executions_left() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let message_one: Vec = vec![2, 5, 7]; + let owner = AccountId32::new(ALICE); + let task_id01 = add_task_to_task_queue( + ALICE, + vec![40], + vec![SCHEDULED_TIME, SCHEDULED_TIME + SLOT_SIZE_SECONDS], + create_dynamic_dispatch_remark_action(message_one.clone()), + vec![], + ); + + match AutomationTime::get_account_task(owner.clone(), task_id01.clone()) { + None => { + panic!("A task should exist if it was scheduled") + } + Some(task) => { + assert_eq!(task.schedule.known_executions_left(), 2); + } + } + + LastTimeSlot::::put((LAST_BLOCK_TIME, LAST_BLOCK_TIME)); + System::reset_events(); + + AutomationTime::trigger_tasks(Weight::from_parts(120_000, 0)); + + let mut condition: BTreeMap, Vec> = BTreeMap::new(); + condition.insert("type".as_bytes().to_vec(), "time".as_bytes().to_vec()); + condition.insert( + "timestamp".as_bytes().to_vec(), + LAST_BLOCK_TIME.to_string().into_bytes(), + ); + + assert_eq!( + events(), + [ + RuntimeEvent::AutomationTime(crate::Event::TaskTriggered { + who: owner.clone(), + task_id: task_id01.clone(), + condition + }), + RuntimeEvent::System(frame_system::pallet::Event::Remarked { + sender: owner.clone(), + hash: BlakeTwo256::hash(&message_one), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskExecuted { + who: owner.clone(), + task_id: task_id01.clone(), + }), + ] + ); + match AutomationTime::get_account_task(owner, task_id01) { + None => { + panic!("A task should exist if it was scheduled") + } + Some(task) => { + assert_eq!(task.schedule.known_executions_left(), 1); + } + } + }) +} + +#[test] +fn trigger_tasks_removes_completed_tasks() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let message_one: Vec = vec![2, 5, 7]; + let owner = AccountId32::new(ALICE); + let task_id01 = add_task_to_task_queue( + ALICE, + vec![40], + vec![SCHEDULED_TIME], + create_dynamic_dispatch_remark_action(message_one.clone()), + vec![], + ); + + match AutomationTime::get_account_task(owner.clone(), task_id01.clone()) { + None => { + panic!("A task should exist if it was scheduled") + } + Some(task) => { + assert_eq!(task.schedule.known_executions_left(), 1); + } + } + + LastTimeSlot::::put((LAST_BLOCK_TIME, LAST_BLOCK_TIME)); + System::reset_events(); + + AutomationTime::trigger_tasks(Weight::from_parts(120_000, 0)); + + let mut condition: BTreeMap, Vec> = BTreeMap::new(); + condition.insert("type".as_bytes().to_vec(), "time".as_bytes().to_vec()); + condition.insert( + "timestamp".as_bytes().to_vec(), + LAST_BLOCK_TIME.to_string().into_bytes(), + ); + + assert_eq!( + events(), + [ + RuntimeEvent::AutomationTime(crate::Event::TaskTriggered { + who: owner.clone(), + task_id: task_id01.clone(), + condition, + }), + RuntimeEvent::System(frame_system::pallet::Event::Remarked { + sender: owner.clone(), + hash: BlakeTwo256::hash(&message_one), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskExecuted { + who: owner.clone(), + task_id: task_id01.clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner.clone(), + task_id: task_id01.clone(), + }), + ] + ); + assert_eq!(AutomationTime::get_account_task(owner, task_id01), None); + }) +} + +#[test] +fn on_init_runs_tasks() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let message_one: Vec = vec![2, 4, 5]; + let owner = AccountId32::new(ALICE); + let task_id1 = add_task_to_task_queue( + ALICE, + vec![40], + vec![SCHEDULED_TIME], + create_dynamic_dispatch_remark_action(message_one.clone()), + vec![], + ); + let message_two: Vec = vec![2, 4]; + let task_id2 = add_task_to_task_queue( + ALICE, + vec![50], + vec![SCHEDULED_TIME], + create_dynamic_dispatch_remark_action(message_two.clone()), + vec![], + ); + let task_id3 = add_task_to_task_queue( + ALICE, + vec![60], + vec![SCHEDULED_TIME], + create_dynamic_dispatch_remark_action(message_two.clone()), + vec![], + ); + let task_id4 = add_task_to_task_queue( + ALICE, + vec![70], + vec![SCHEDULED_TIME], + create_dynamic_dispatch_remark_action(message_two.clone()), + vec![], + ); + let task_id5 = add_task_to_task_queue( + ALICE, + vec![80], + vec![SCHEDULED_TIME], + create_dynamic_dispatch_remark_action(vec![50]), + vec![], + ); + LastTimeSlot::::put((LAST_BLOCK_TIME, LAST_BLOCK_TIME)); + + AutomationTime::on_initialize(1); + + let mut condition: BTreeMap, Vec> = BTreeMap::new(); + condition.insert("type".as_bytes().to_vec(), "time".as_bytes().to_vec()); + condition.insert( + "timestamp".as_bytes().to_vec(), + LAST_BLOCK_TIME.to_string().into_bytes(), + ); + + assert_eq!( + events(), + [ + // The execution of task 1 + RuntimeEvent::AutomationTime(crate::Event::TaskTriggered { + who: owner.clone(), + task_id: task_id1.clone(), + condition: condition.clone(), + }), + RuntimeEvent::System(frame_system::pallet::Event::Remarked { + sender: owner.clone(), + hash: BlakeTwo256::hash(&message_one), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskExecuted { + who: owner.clone(), + task_id: task_id1.clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner.clone(), + task_id: task_id1.clone(), + }), + // The execution of task 2 + RuntimeEvent::AutomationTime(crate::Event::TaskTriggered { + who: owner.clone(), + task_id: task_id2.clone(), + condition: condition.clone(), + }), + RuntimeEvent::System(frame_system::pallet::Event::Remarked { + sender: owner.clone(), + hash: BlakeTwo256::hash(&message_two), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskExecuted { + who: owner.clone(), + task_id: task_id2.clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner.clone(), + task_id: task_id2.clone(), + }), + // The execution of task 3 + RuntimeEvent::AutomationTime(crate::Event::TaskTriggered { + who: owner.clone(), + task_id: task_id3.clone(), + condition: condition.clone(), + }), + RuntimeEvent::System(frame_system::pallet::Event::Remarked { + sender: owner.clone(), + hash: BlakeTwo256::hash(&message_two), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskExecuted { + who: owner.clone(), + task_id: task_id3.clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner.clone(), + task_id: task_id3.clone(), + }), + // The execution of task 4 + RuntimeEvent::AutomationTime(crate::Event::TaskTriggered { + who: owner.clone(), + task_id: task_id4.clone(), + condition: condition.clone(), + }), + RuntimeEvent::System(frame_system::pallet::Event::Remarked { + sender: owner.clone(), + hash: BlakeTwo256::hash(&message_two), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskExecuted { + who: owner.clone(), + task_id: task_id4.clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner.clone(), + task_id: task_id4.clone(), + }), + ] + ); + assert_eq!( + AutomationTime::get_account_task(owner.clone(), task_id1), + None + ); + assert_eq!( + AutomationTime::get_account_task(owner.clone(), task_id2), + None + ); + assert_eq!( + AutomationTime::get_account_task(owner.clone(), task_id3), + None + ); + assert_eq!( + AutomationTime::get_account_task(owner.clone(), task_id4), + None + ); + assert_ne!( + AutomationTime::get_account_task(owner.clone(), task_id5.clone()), + None + ); + assert_eq!(AutomationTime::get_task_queue().len(), 1); + assert_eq!(AutomationTime::get_missed_queue().len(), 0); + + Timestamp::set_timestamp(START_BLOCK_TIME + (SLOT_SIZE_SECONDS * 1_000)); + AutomationTime::on_initialize(2); + assert_eq!( + events(), + [ + RuntimeEvent::AutomationTime(crate::Event::TaskMissed { + who: AccountId32::new(ALICE), + task_id: task_id5.clone(), + execution_time: LAST_BLOCK_TIME + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner.clone(), + task_id: task_id5.clone(), + }), + ], + ); + assert_eq!(AutomationTime::get_account_task(owner, task_id5), None); + assert_eq!(AutomationTime::get_task_queue().len(), 0); + assert_eq!(AutomationTime::get_missed_queue().len(), 0); + }) +} + +// When our blockchain boot and initialize, it will start trigger and run tasks up to +// a MaxWeightPercentage of the MaxBlockWeight +// +#[test] +fn on_init_check_task_queue() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + LastTimeSlot::::put((LAST_BLOCK_TIME, LAST_BLOCK_TIME - SLOT_SIZE_SECONDS * 2)); + let mut tasks = vec![]; + + for i in 0..9 { + let task_id = add_task_to_task_queue( + ALICE, + vec![i], + vec![SCHEDULED_TIME], + create_dynamic_dispatch_remark_action(vec![i]), + vec![], + ); + tasks.push(task_id.clone()); + } + Timestamp::set_timestamp(START_BLOCK_TIME + (10 * 1000)); + AutomationTime::on_initialize(1); + + let owner = AccountId32::new(ALICE); + + let mut condition: BTreeMap, Vec> = BTreeMap::new(); + condition.insert("type".as_bytes().to_vec(), "time".as_bytes().to_vec()); + condition.insert( + "timestamp".as_bytes().to_vec(), + LAST_BLOCK_TIME.to_string().into_bytes(), + ); + + assert_eq!( + events(), + [ + RuntimeEvent::AutomationTime(crate::Event::TaskTriggered { + who: owner.clone(), + task_id: tasks[0].clone(), + condition: condition.clone(), + }), + RuntimeEvent::System(frame_system::pallet::Event::Remarked { + sender: owner.clone(), + hash: BlakeTwo256::hash(vec![0].as_slice()), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskExecuted { + who: owner.clone(), + task_id: tasks[0].clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner.clone(), + task_id: tasks[0].clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskTriggered { + who: owner.clone(), + task_id: tasks[1].clone(), + condition: condition.clone(), + }), + RuntimeEvent::System(frame_system::pallet::Event::Remarked { + sender: owner.clone(), + hash: BlakeTwo256::hash(vec![1].as_slice()), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskExecuted { + who: owner.clone(), + task_id: tasks[1].clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner.clone(), + task_id: tasks[1].clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskTriggered { + who: owner.clone(), + task_id: tasks[2].clone(), + condition: condition.clone(), + }), + RuntimeEvent::System(frame_system::pallet::Event::Remarked { + sender: owner.clone(), + hash: BlakeTwo256::hash(vec![2].as_slice()), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskExecuted { + who: owner.clone(), + task_id: tasks[2].clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner.clone(), + task_id: tasks[2].clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskTriggered { + who: owner.clone(), + task_id: tasks[3].clone(), + condition: condition.clone(), + }), + RuntimeEvent::System(frame_system::pallet::Event::Remarked { + sender: owner.clone(), + hash: BlakeTwo256::hash(vec![3].as_slice()), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskExecuted { + who: owner.clone(), + task_id: tasks[3].clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner.clone(), + task_id: tasks[3].clone(), + }), + ], + ); + assert_eq!(AutomationTime::get_task_queue().len(), 5); + assert_eq!(AutomationTime::get_missed_queue().len(), 0); + + Timestamp::set_timestamp(START_BLOCK_TIME + (40 * 1000)); + AutomationTime::on_initialize(2); + assert_eq!( + events(), + [ + RuntimeEvent::AutomationTime(crate::Event::TaskTriggered { + who: owner.clone(), + task_id: tasks[4].clone(), + condition: condition.clone(), + }), + RuntimeEvent::System(frame_system::pallet::Event::Remarked { + sender: owner.clone(), + hash: BlakeTwo256::hash(vec![4].as_slice()), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskExecuted { + who: owner.clone(), + task_id: tasks[4].clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner.clone(), + task_id: tasks[4].clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskTriggered { + who: owner.clone(), + task_id: tasks[5].clone(), + condition: condition.clone(), + }), + RuntimeEvent::System(frame_system::pallet::Event::Remarked { + sender: owner.clone(), + hash: BlakeTwo256::hash(vec![5].as_slice()), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskExecuted { + who: owner.clone(), + task_id: tasks[5].clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner.clone(), + task_id: tasks[5].clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskTriggered { + who: owner.clone(), + task_id: tasks[6].clone(), + condition: condition.clone(), + }), + RuntimeEvent::System(frame_system::pallet::Event::Remarked { + sender: owner.clone(), + hash: BlakeTwo256::hash(vec![6].as_slice()), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskExecuted { + who: owner.clone(), + task_id: tasks[6].clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner.clone(), + task_id: tasks[6].clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskTriggered { + who: owner.clone(), + task_id: tasks[7].clone(), + condition: condition.clone(), + }), + RuntimeEvent::System(frame_system::pallet::Event::Remarked { + sender: owner.clone(), + hash: BlakeTwo256::hash(vec![7].as_slice()), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskExecuted { + who: owner.clone(), + task_id: tasks[7].clone(), + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner.clone(), + task_id: tasks[7].clone(), + }), + ], + ); + assert_eq!(AutomationTime::get_task_queue().len(), 1); + assert_eq!(AutomationTime::get_missed_queue().len(), 0); + + Timestamp::set_timestamp(START_BLOCK_TIME + (SLOT_SIZE_SECONDS * 1000)); + AutomationTime::on_initialize(3); + assert_eq!( + events(), + [ + RuntimeEvent::AutomationTime(crate::Event::TaskMissed { + who: AccountId32::new(ALICE), + task_id: tasks[8].clone(), + execution_time: LAST_BLOCK_TIME + }), + RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { + who: owner, + task_id: tasks[8].clone(), + }), + ], + ); + assert_eq!(AutomationTime::get_task_queue().len(), 0); + assert_eq!(AutomationTime::get_missed_queue().len(), 0); + }) +} diff --git a/pallets/automation-time/src/types.rs b/pallets/automation-time/src/types.rs new file mode 100644 index 000000000..a857c5239 --- /dev/null +++ b/pallets/automation-time/src/types.rs @@ -0,0 +1,695 @@ +// This file is part of Ava Protocol. + +// Copyright (C) 2022 Ava Protocol +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{weights::WeightInfo, Config, Error, InstructionSequence, Pallet}; + +use frame_support::{dispatch::GetDispatchInfo, pallet_prelude::*, traits::Get}; + +use sp_runtime::traits::CheckedConversion; +use sp_std::prelude::*; + +// use pallet_automation_time_rpc_runtime_api::AutomationAction; + +use staging_xcm::{latest::prelude::*, VersionedLocation}; + +pub type Seconds = u64; +pub type UnixTime = u64; + +/// The struct that stores execution payment for a task. +#[derive(Debug, Encode, Eq, PartialEq, Decode, TypeInfo, Clone)] +pub struct AssetPayment { + pub asset_location: VersionedLocation, + pub amount: u128, +} + +/// The enum that stores all action specific data. +#[derive(Clone, Debug, Eq, PartialEq, Encode, Decode, TypeInfo)] +pub enum Action { + XCMP { + destination: Location, + schedule_fee: Location, + execution_fee: Box, + encoded_call: Vec, + encoded_call_weight: Weight, + overall_weight: Weight, + schedule_as: Option, + instruction_sequence: InstructionSequence, + }, + DynamicDispatch { + encoded_call: Vec, + }, +} + +impl Action { + pub fn execution_weight(&self) -> Result { + let weight = match self { + Action::XCMP { .. } => ::WeightInfo::run_xcmp_task(), + Action::DynamicDispatch { encoded_call } => { + let scheduled_call: ::RuntimeCall = + Decode::decode(&mut &**encoded_call) + .map_err(|_| Error::::CallCannotBeDecoded)?; + ::WeightInfo::run_dynamic_dispatch_action() + .saturating_add(scheduled_call.get_dispatch_info().weight) + } + }; + Ok(weight.ref_time()) + } + + pub fn schedule_fee_location(&self) -> Location { + match self { + Action::XCMP { schedule_fee, .. } => schedule_fee.clone(), + _ => Location::default(), + } + } +} + +/// API Param for Scheduling +#[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] +pub enum ScheduleParam { + Fixed { + execution_times: Vec, + }, + Recurring { + next_execution_time: UnixTime, + frequency: Seconds, + }, +} + +impl ScheduleParam { + /// Convert from ScheduleParam to Schedule + pub fn validated_into(self) -> Result { + match self { + Self::Fixed { + execution_times, .. + } => Schedule::new_fixed_schedule::(execution_times), + Self::Recurring { + next_execution_time, + frequency, + } => Schedule::new_recurring_schedule::(next_execution_time, frequency), + } + } + + /// Number of known executions at the time of scheduling the task + pub fn number_of_executions(&self) -> u32 { + match self { + Self::Fixed { execution_times } => { + execution_times.len().try_into().expect("bounded by u32") + } + Self::Recurring { .. } => 1, + } + } +} + +#[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] +pub enum Schedule { + Fixed { + execution_times: Vec, + executions_left: u32, + }, + Recurring { + next_execution_time: UnixTime, + frequency: Seconds, + }, +} + +impl Schedule { + pub fn new_fixed_schedule( + mut execution_times: Vec, + ) -> Result { + Pallet::::clean_execution_times_vector(&mut execution_times); + let executions_left = execution_times.len() as u32; + let schedule = Self::Fixed { + execution_times, + executions_left, + }; + schedule.valid::()?; + Ok(schedule) + } + + pub fn new_recurring_schedule( + next_execution_time: UnixTime, + frequency: Seconds, + ) -> Result { + let schedule = Self::Recurring { + next_execution_time, + frequency, + }; + schedule.valid::()?; + Ok(schedule) + } + + pub fn known_executions_left(&self) -> u32 { + match self { + Self::Fixed { + executions_left, .. + } => *executions_left, + Self::Recurring { .. } => 1, + } + } + + fn valid(&self) -> DispatchResult { + match self { + Self::Fixed { + execution_times, .. + } => { + let number_of_executions: u32 = execution_times + .len() + .checked_into() + .ok_or(Error::::TooManyExecutionsTimes)?; + if number_of_executions == 0 { + Err(Error::::InvalidTime)?; + } + if number_of_executions > T::MaxExecutionTimes::get() { + Err(Error::::TooManyExecutionsTimes)?; + } + for time in execution_times.iter() { + Pallet::::is_valid_time(*time)?; + } + } + Self::Recurring { + next_execution_time, + frequency, + } => { + Pallet::::is_valid_time(*next_execution_time)?; + // Validate frequency by ensuring that the next proposed execution is at a valid time + let next_recurrence = next_execution_time + .checked_add(*frequency) + .ok_or(Error::::TimeTooFarOut)?; + if *next_execution_time == next_recurrence { + Err(Error::::InvalidTime)?; + } + Pallet::::is_valid_time(next_recurrence)?; + } + } + Ok(()) + } +} + +/// The struct that stores all information needed for a task. +#[derive(Debug, Encode, Decode, TypeInfo, Clone)] +#[scale_info(skip_type_params(MaxExecutionTimes))] +pub struct Task { + pub owner_id: AccountId, + pub task_id: Vec, + pub schedule: Schedule, + pub action: Action, + pub abort_errors: Vec>, +} + +impl PartialEq for Task { + fn eq(&self, other: &Self) -> bool { + self.owner_id == other.owner_id + && self.task_id == other.task_id + && self.action == other.action + && self.schedule == other.schedule + } +} + +impl Eq for Task {} + +impl Task { + pub fn new( + owner_id: AccountId, + task_id: Vec, + schedule: Schedule, + action: Action, + abort_errors: Vec>, + ) -> Self { + Self { + owner_id, + task_id, + schedule, + action, + abort_errors, + } + } + + pub fn create_event_task( + owner_id: AccountId, + task_id: Vec, + execution_times: Vec, + message: Vec, + abort_errors: Vec>, + ) -> Result { + let call: ::RuntimeCall = + frame_system::Call::remark_with_event { remark: message }.into(); + let action = Action::DynamicDispatch { + encoded_call: call.encode(), + }; + let schedule = Schedule::new_fixed_schedule::(execution_times)?; + Ok(Self::new(owner_id, task_id, schedule, action, abort_errors)) + } + + pub fn create_xcmp_task( + owner_id: AccountId, + task_id: Vec, + execution_times: Vec, + destination: Location, + schedule_fee: Location, + execution_fee: AssetPayment, + encoded_call: Vec, + encoded_call_weight: Weight, + overall_weight: Weight, + schedule_as: Option, + instruction_sequence: InstructionSequence, + abort_errors: Vec>, + ) -> Result { + let action = Action::XCMP { + destination, + schedule_fee, + execution_fee: Box::new(execution_fee), + encoded_call, + encoded_call_weight, + overall_weight, + schedule_as, + instruction_sequence, + }; + let schedule = Schedule::new_fixed_schedule::(execution_times)?; + Ok(Self::new(owner_id, task_id, schedule, action, abort_errors)) + } + + pub fn execution_times(&self) -> Vec { + match &self.schedule { + Schedule::Fixed { + execution_times, .. + } => execution_times.to_vec(), + Schedule::Recurring { + next_execution_time, + .. + } => { + vec![*next_execution_time] + } + } + } +} + +#[derive(Debug, Eq, PartialEq, Encode, Decode, TypeInfo)] +pub struct MissedTaskV2 { + pub owner_id: AccountId, + pub task_id: TaskId, + pub execution_time: UnixTime, +} + +impl MissedTaskV2 { + pub fn new(owner_id: AccountId, task_id: TaskId, execution_time: UnixTime) -> Self { + Self { + owner_id, + task_id, + execution_time, + } + } +} + +#[derive(Debug, Decode, Eq, Encode, PartialEq, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct ScheduledTasks { + pub tasks: Vec<(AccountId, TaskId)>, + pub weight: u128, +} +impl Default for ScheduledTasks { + fn default() -> Self { + Self { + tasks: vec![], + weight: 0, + } + } +} +impl ScheduledTasks { + pub fn try_push( + &mut self, + task_id: TaskId, + task: &Task, + ) -> Result<&mut Self, DispatchError> + where + AccountId: Clone, + { + let action_weight = task.action.execution_weight::()?; + let weight = self + .weight + .checked_add(action_weight as u128) + .ok_or(Error::::TimeSlotFull)?; + // A hard limit on tasks/slot prevents unforseen performance consequences + // that could occur when scheduling a huge number of lightweight tasks. + // Also allows us to make reasonable assumptions for worst case benchmarks. + if self.tasks.len() as u32 >= T::MaxTasksPerSlot::get() + || weight > T::MaxWeightPerSlot::get() + { + Err(Error::::TimeSlotFull)? + } + + self.weight = weight; + self.tasks.push((task.owner_id.clone(), task_id)); + Ok(self) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + mock::*, + tests::{SCHEDULED_TIME, SLOT_SIZE_SECONDS, START_BLOCK_TIME}, + }; + use frame_support::{assert_err, assert_ok}; + + mod scheduled_tasks { + use super::*; + use crate::{AccountTaskId, ScheduledTasksOf, TaskOf}; + use sp_runtime::AccountId32; + + #[test] + fn try_push_errors_when_slot_is_full_by_weight() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let task = TaskOf::::create_event_task::( + AccountId32::new(ALICE), + vec![0], + vec![SCHEDULED_TIME], + vec![0], + vec![], + ) + .unwrap(); + let task_id = vec![48, 45, 48, 45, 48]; + assert_err!( + ScheduledTasksOf:: { + tasks: vec![], + weight: MaxWeightPerSlot::get() + } + .try_push::(task_id, &task), + Error::::TimeSlotFull + ); + }) + } + + #[test] + fn try_push_errors_when_slot_is_full_by_task_count() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let alice = AccountId32::new(ALICE); + let id = (alice.clone(), vec![49, 45, 48, 45, 42]); + + let task = TaskOf::::create_event_task::( + alice, + vec![0], + vec![SCHEDULED_TIME], + vec![0], + vec![], + ) + .unwrap(); + let tasks = (0..MaxTasksPerSlot::get()).fold::>, _>( + vec![], + |mut tasks, _| { + tasks.push(id.clone()); + tasks + }, + ); + let task_id = vec![48, 45, 48, 45, 48]; + assert_err!( + ScheduledTasksOf:: { tasks, weight: 0 }.try_push::(task_id, &task), + Error::::TimeSlotFull + ); + }) + } + + // verify calling try_push to push the task into the schedule work when + // slot is not full, not reaching the max weight and max tasks per slot + // task will be pushed to the `tasks` field and the `weight` field is + // increased properly for the weight of the task. + // + // the total weight of schedule will be weight of the schedule_* itself + // plus any to be call extrinsics in case of dynamic dispatch + // + #[test] + fn try_push_works_when_slot_is_not_full() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let task = TaskOf::::create_event_task::( + AccountId32::new(ALICE), + vec![0], + vec![SCHEDULED_TIME], + vec![0], + vec![], + ) + .unwrap(); + // When we schedule a test on the first block, on the first extrinsics and no event + // at all this is the first task id we generate + // {block-num}-{extrinsics-idx}-{evt-idx} + let task_id = "0-1-0".as_bytes().to_vec(); + let mut scheduled_tasks = ScheduledTasksOf::::default(); + scheduled_tasks + .try_push::(task_id.clone(), &task) + .expect("slot is not full"); + + assert_eq!(scheduled_tasks.tasks, vec![(task.owner_id, task_id)]); + + // this is same call we mock in create_event_tasks + let call: ::RuntimeCall = + frame_system::Call::remark_with_event { remark: vec![0] }.into(); + // weight will be equal = weight of the dynamic dispatch + the call itself + assert_eq!( + scheduled_tasks.weight, + 20_000 + call.get_dispatch_info().weight.ref_time() as u128 + ); + }) + } + } + + mod schedule_param { + use super::*; + + #[test] + fn sets_executions_left() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let t1 = SCHEDULED_TIME + SLOT_SIZE_SECONDS; + let t2 = SCHEDULED_TIME + SLOT_SIZE_SECONDS * 2; + let t3 = SCHEDULED_TIME + SLOT_SIZE_SECONDS * 3; + let s = ScheduleParam::Fixed { + execution_times: vec![t1, t2, t3], + } + .validated_into::() + .expect("valid"); + if let Schedule::Fixed { + executions_left, .. + } = s + { + assert_eq!(executions_left, 3); + } else { + panic!("Exepected Schedule::Fixed"); + } + }) + } + + #[test] + fn validates_fixed_schedule() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let t1 = SCHEDULED_TIME + SLOT_SIZE_SECONDS / 2; + let s = ScheduleParam::Fixed { + execution_times: vec![t1], + } + .validated_into::(); + assert_err!(s, Error::::InvalidTime); + }) + } + + #[test] + fn validates_recurring_schedule() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let s = ScheduleParam::Recurring { + next_execution_time: SCHEDULED_TIME, + frequency: SLOT_SIZE_SECONDS, + } + .validated_into::() + .expect("valid"); + if let Schedule::Recurring { + next_execution_time, + .. + } = s + { + assert_eq!(next_execution_time, SCHEDULED_TIME); + } else { + panic!("Exepected Schedule::Recurring"); + } + + let s = ScheduleParam::Recurring { + next_execution_time: SCHEDULED_TIME, + frequency: SLOT_SIZE_SECONDS + 1, + } + .validated_into::(); + assert_err!(s, Error::::InvalidTime); + }) + } + + #[test] + fn counts_executions() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let t1 = SCHEDULED_TIME + SLOT_SIZE_SECONDS; + let t2 = SCHEDULED_TIME + SLOT_SIZE_SECONDS * 2; + let t3 = SCHEDULED_TIME + SLOT_SIZE_SECONDS * 3; + + let s = ScheduleParam::Fixed { + execution_times: vec![t1, t2, t3], + }; + assert_eq!(s.number_of_executions(), 3); + + let s = ScheduleParam::Recurring { + next_execution_time: SCHEDULED_TIME, + frequency: SLOT_SIZE_SECONDS, + }; + assert_eq!(s.number_of_executions(), 1); + }) + } + } + + mod schedule { + use super::*; + + #[test] + fn new_fixed_schedule_sets_executions_left() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let t1 = SCHEDULED_TIME + SLOT_SIZE_SECONDS; + let t2 = SCHEDULED_TIME + SLOT_SIZE_SECONDS * 2; + let t3 = SCHEDULED_TIME + SLOT_SIZE_SECONDS * 3; + let s = Schedule::new_fixed_schedule::(vec![t1, t2, t3]).unwrap(); + if let Schedule::Fixed { + executions_left, .. + } = s + { + assert_eq!(executions_left, 3); + } else { + panic!("Exepected Schedule::Fixed"); + } + }) + } + + #[test] + fn new_fixed_schedule_errors_with_too_many_executions() { + new_test_ext(0).execute_with(|| { + let s = Schedule::new_fixed_schedule::( + (0u64..=MaxExecutionTimes::get() as u64).collect(), + ); + assert_err!(s, Error::::TooManyExecutionsTimes); + }) + } + + #[test] + fn new_fixed_schedule_cleans_execution_times() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + let t1 = SCHEDULED_TIME + SLOT_SIZE_SECONDS; + let t2 = SCHEDULED_TIME + SLOT_SIZE_SECONDS * 2; + let t3 = SCHEDULED_TIME + SLOT_SIZE_SECONDS * 3; + let s = Schedule::new_fixed_schedule::(vec![t1, t3, t2, t3, t3]); + if let Schedule::Fixed { + execution_times, .. + } = s.unwrap() + { + assert_eq!(execution_times, vec![t1, t2, t3]); + } else { + panic!("Exepected Schedule::Fixed"); + } + }) + } + + #[test] + fn checks_for_fixed_schedule_validity() { + new_test_ext(START_BLOCK_TIME).execute_with(|| { + assert_ok!(Schedule::new_fixed_schedule::(vec![ + SCHEDULED_TIME + SLOT_SIZE_SECONDS + ])); + // Execution time does not end in whole hour + assert_err!( + Schedule::new_fixed_schedule::(vec![ + SCHEDULED_TIME + SLOT_SIZE_SECONDS, + SCHEDULED_TIME + SLOT_SIZE_SECONDS + SLOT_SIZE_SECONDS / 2 + ]), + Error::::InvalidTime + ); + // No execution times + assert_err!( + Schedule::new_fixed_schedule::(vec![]), + Error::::InvalidTime + ); + }) + } + + #[test] + fn checks_for_recurring_schedule_validity() { + let start_time = 1_663_225_200; + new_test_ext(start_time * 1_000).execute_with(|| { + assert_ok!(Schedule::Recurring { + next_execution_time: start_time + SLOT_SIZE_SECONDS, + frequency: SLOT_SIZE_SECONDS + } + .valid::()); + // Next execution time not at hour granuality + assert_err!( + Schedule::Recurring { + next_execution_time: start_time + SLOT_SIZE_SECONDS + SLOT_SIZE_SECONDS / 2, + frequency: SLOT_SIZE_SECONDS + } + .valid::(), + Error::::InvalidTime + ); + // Frequency not at hour granularity + assert_err!( + Schedule::Recurring { + next_execution_time: start_time + SLOT_SIZE_SECONDS + SLOT_SIZE_SECONDS / 2, + frequency: SLOT_SIZE_SECONDS + SLOT_SIZE_SECONDS / 2 + } + .valid::(), + Error::::InvalidTime + ); + // Frequency of 0 + assert_err!( + Schedule::Recurring { + next_execution_time: start_time + SLOT_SIZE_SECONDS, + frequency: 0 + } + .valid::(), + Error::::InvalidTime + ); + // Frequency too far out + assert_err!( + Schedule::Recurring { + next_execution_time: start_time + SLOT_SIZE_SECONDS, + frequency: start_time + SLOT_SIZE_SECONDS + } + .valid::(), + Error::::TimeTooFarOut + ); + }) + } + + #[test] + fn number_of_known_executions_for_fixed() { + new_test_ext(0).execute_with(|| { + let s = Schedule::Fixed { + execution_times: vec![], + executions_left: 5, + }; + assert_eq!(s.known_executions_left(), 5); + }) + } + + #[test] + fn number_of_known_executions_for_recurring() { + new_test_ext(0).execute_with(|| { + let s = Schedule::Recurring { + next_execution_time: 0, + frequency: 0, + }; + assert_eq!(s.known_executions_left(), 1); + }) + } + } +} diff --git a/pallets/automation-time/src/weights.rs b/pallets/automation-time/src/weights.rs new file mode 100644 index 000000000..548e89f73 --- /dev/null +++ b/pallets/automation-time/src/weights.rs @@ -0,0 +1,823 @@ +// This file is part of Ava Protocol. + +// Copyright (C) Ava Protocol Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Autogenerated weights for pallet_automation_time +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2024-01-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `actions-runner-1`, CPU: `Intel(R) Xeon(R) E-2388G CPU @ 3.20GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("turing-dev"), DB CACHE: 1024 + +// Executed Command: +// target/release/container-chain-simple-node +// benchmark +// pallet +// --header +// ./.maintain/HEADER-GPL3 +// --chain +// turing-dev +// --execution +// wasm +// --wasm-execution +// compiled +// --pallet +// pallet_automation_time +// --extrinsic +// * +// --repeat +// 20 +// --steps +// 50 +// --output +// ./automation_time-raw-weights.rs +// --template +// ./.maintain/frame-weight-template.hbs + +// Summary: +//:schedule_xcmp_task_full 173_102_820,8799 +//:schedule_auto_compound_delegated_stake_task_full 115_133_000,10008 +//:schedule_dynamic_dispatch_task 80_333_154,6196 +//:schedule_dynamic_dispatch_task_full 96_845_515,6196 +//:cancel_scheduled_task_full 1_410_325_000,406458 +//:force_cancel_scheduled_task 29_022_000,3927 +//:force_cancel_scheduled_task_full 1_422_630_000,406458 +//:cancel_task_with_schedule_as_full 170_642_000,93492 +//:run_xcmp_task 38_529_000,3946 +//:run_auto_compound_delegated_stake_task 80_419_000,4929 +//:run_dynamic_dispatch_action 8_778_000,3598 +//:run_dynamic_dispatch_action_fail_decode 965_000,0 +//:run_missed_tasks_many_found 304_932,1871 +//:run_missed_tasks_many_missing 298_306,1787 +//:run_tasks_many_found 3_570_563,2001 +//:run_tasks_many_missing 2_821_983,1493 +//:update_task_queue_overhead 2_747_000,1493 +//:append_to_missed_tasks 3_350_768,13581 +//:update_scheduled_task_queue 45_812_000,12375 +//:shift_missed_tasks 6_175_000,3681 + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for pallet_automation_time. +pub trait WeightInfo { + fn schedule_xcmp_task_full(v: u32, ) -> Weight; + fn schedule_auto_compound_delegated_stake_task_full() -> Weight; + fn schedule_dynamic_dispatch_task(v: u32, ) -> Weight; + fn schedule_dynamic_dispatch_task_full(v: u32, ) -> Weight; + fn cancel_scheduled_task_full() -> Weight; + fn force_cancel_scheduled_task() -> Weight; + fn force_cancel_scheduled_task_full() -> Weight; + fn cancel_task_with_schedule_as_full() -> Weight; + fn run_xcmp_task() -> Weight; + fn run_auto_compound_delegated_stake_task() -> Weight; + fn run_dynamic_dispatch_action() -> Weight; + fn run_dynamic_dispatch_action_fail_decode() -> Weight; + fn run_missed_tasks_many_found(v: u32, ) -> Weight; + fn run_missed_tasks_many_missing(v: u32, ) -> Weight; + fn run_tasks_many_found(v: u32, ) -> Weight; + fn run_tasks_many_missing(v: u32, ) -> Weight; + fn update_task_queue_overhead() -> Weight; + fn append_to_missed_tasks(v: u32, ) -> Weight; + fn update_scheduled_task_queue() -> Weight; + fn shift_missed_tasks() -> Weight; +} + +/// Weights for pallet_automation_time using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: AssetRegistry LocationToAssetId (r:2 w:0) + /// Proof Skipped: AssetRegistry LocationToAssetId (max_values: None, max_size: None, mode: Measured) + /// Storage: AssetRegistry Metadata (r:1 w:0) + /// Proof Skipped: AssetRegistry Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: Tokens Accounts (r:2 w:2) + /// Proof: Tokens Accounts (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) + /// Storage: System Account (r:3 w:1) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: AutomationTime AccountTasks (r:1 w:1) + /// Proof Skipped: AutomationTime AccountTasks (max_values: None, max_size: None, mode: Measured) + /// Storage: AutomationTime ScheduledTasksV3 (r:36 w:36) + /// Proof Skipped: AutomationTime ScheduledTasksV3 (max_values: None, max_size: None, mode: Measured) + /// Storage: Tokens TotalIssuance (r:1 w:1) + /// Proof: Tokens TotalIssuance (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) + /// The range of component `v` is `[1, 36]`. + fn schedule_xcmp_task_full(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `2072 + v * (8712 ±0)` + // Estimated: `8799 + v * (11187 ±0)` + // Minimum execution time: 206_501_000 picoseconds. + Weight::from_parts(173_102_820, 8799) + // Standard Error: 19_713 + .saturating_add(Weight::from_parts(36_732_883, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) + .saturating_add(T::DbWeight::get().writes(5_u64)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(v.into()))) + .saturating_add(Weight::from_parts(0, 11187).saturating_mul(v.into())) + } + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: AssetRegistry LocationToAssetId (r:1 w:0) + /// Proof Skipped: AssetRegistry LocationToAssetId (max_values: None, max_size: None, mode: Measured) + /// Storage: System Account (r:2 w:2) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: AutomationTime AccountTasks (r:1 w:1) + /// Proof Skipped: AutomationTime AccountTasks (max_values: None, max_size: None, mode: Measured) + /// Storage: AutomationTime ScheduledTasksV3 (r:1 w:1) + /// Proof Skipped: AutomationTime ScheduledTasksV3 (max_values: None, max_size: None, mode: Measured) + fn schedule_auto_compound_delegated_stake_task_full() -> Weight { + // Proof Size summary in bytes: + // Measured: `6543` + // Estimated: `10008` + // Minimum execution time: 113_525_000 picoseconds. + Weight::from_parts(115_133_000, 10008) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: AssetRegistry LocationToAssetId (r:1 w:0) + /// Proof Skipped: AssetRegistry LocationToAssetId (max_values: None, max_size: None, mode: Measured) + /// Storage: System Account (r:2 w:2) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: AutomationTime AccountTasks (r:1 w:1) + /// Proof Skipped: AutomationTime AccountTasks (max_values: None, max_size: None, mode: Measured) + /// Storage: AutomationTime ScheduledTasksV3 (r:36 w:36) + /// Proof Skipped: AutomationTime ScheduledTasksV3 (max_values: None, max_size: None, mode: Measured) + /// The range of component `v` is `[1, 36]`. + fn schedule_dynamic_dispatch_task(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `790` + // Estimated: `6196 + v * (2475 ±0)` + // Minimum execution time: 83_137_000 picoseconds. + Weight::from_parts(80_333_154, 6196) + // Standard Error: 3_420 + .saturating_add(Weight::from_parts(3_986_820, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) + .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(v.into()))) + .saturating_add(Weight::from_parts(0, 2475).saturating_mul(v.into())) + } + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: AssetRegistry LocationToAssetId (r:1 w:0) + /// Proof Skipped: AssetRegistry LocationToAssetId (max_values: None, max_size: None, mode: Measured) + /// Storage: System Account (r:2 w:2) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: AutomationTime AccountTasks (r:1 w:1) + /// Proof Skipped: AutomationTime AccountTasks (max_values: None, max_size: None, mode: Measured) + /// Storage: AutomationTime ScheduledTasksV3 (r:36 w:36) + /// Proof Skipped: AutomationTime ScheduledTasksV3 (max_values: None, max_size: None, mode: Measured) + /// The range of component `v` is `[1, 36]`. + fn schedule_dynamic_dispatch_task_full(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1673 + v * (8712 ±0)` + // Estimated: `6196 + v * (11187 ±0)` + // Minimum execution time: 132_530_000 picoseconds. + Weight::from_parts(96_845_515, 6196) + // Standard Error: 16_049 + .saturating_add(Weight::from_parts(38_443_161, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) + .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(v.into()))) + .saturating_add(Weight::from_parts(0, 11187).saturating_mul(v.into())) + } + /// Storage: AutomationTime AccountTasks (r:1 w:1) + /// Proof Skipped: AutomationTime AccountTasks (max_values: None, max_size: None, mode: Measured) + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: AutomationTime LastTimeSlot (r:1 w:0) + /// Proof Skipped: AutomationTime LastTimeSlot (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: AutomationTime ScheduledTasksV3 (r:36 w:36) + /// Proof Skipped: AutomationTime ScheduledTasksV3 (max_values: None, max_size: None, mode: Measured) + fn cancel_scheduled_task_full() -> Weight { + // Proof Size summary in bytes: + // Measured: `316368` + // Estimated: `406458` + // Minimum execution time: 1_380_349_000 picoseconds. + Weight::from_parts(1_410_325_000, 406458) + .saturating_add(T::DbWeight::get().reads(39_u64)) + .saturating_add(T::DbWeight::get().writes(37_u64)) + } + /// Storage: AutomationTime AccountTasks (r:1 w:1) + /// Proof Skipped: AutomationTime AccountTasks (max_values: None, max_size: None, mode: Measured) + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: AutomationTime LastTimeSlot (r:1 w:0) + /// Proof Skipped: AutomationTime LastTimeSlot (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: AutomationTime ScheduledTasksV3 (r:1 w:1) + /// Proof Skipped: AutomationTime ScheduledTasksV3 (max_values: None, max_size: None, mode: Measured) + fn force_cancel_scheduled_task() -> Weight { + // Proof Size summary in bytes: + // Measured: `462` + // Estimated: `3927` + // Minimum execution time: 28_671_000 picoseconds. + Weight::from_parts(29_022_000, 3927) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: AutomationTime AccountTasks (r:1 w:1) + /// Proof Skipped: AutomationTime AccountTasks (max_values: None, max_size: None, mode: Measured) + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: AutomationTime LastTimeSlot (r:1 w:0) + /// Proof Skipped: AutomationTime LastTimeSlot (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: AutomationTime ScheduledTasksV3 (r:36 w:36) + /// Proof Skipped: AutomationTime ScheduledTasksV3 (max_values: None, max_size: None, mode: Measured) + fn force_cancel_scheduled_task_full() -> Weight { + // Proof Size summary in bytes: + // Measured: `316368` + // Estimated: `406458` + // Minimum execution time: 1_414_746_000 picoseconds. + Weight::from_parts(1_422_630_000, 406458) + .saturating_add(T::DbWeight::get().reads(39_u64)) + .saturating_add(T::DbWeight::get().writes(37_u64)) + } + /// Storage: AutomationTime AccountTasks (r:1 w:1) + /// Proof Skipped: AutomationTime AccountTasks (max_values: None, max_size: None, mode: Measured) + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: AutomationTime LastTimeSlot (r:1 w:0) + /// Proof Skipped: AutomationTime LastTimeSlot (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: AutomationTime ScheduledTasksV3 (r:36 w:36) + /// Proof Skipped: AutomationTime ScheduledTasksV3 (max_values: None, max_size: None, mode: Measured) + fn cancel_task_with_schedule_as_full() -> Weight { + // Proof Size summary in bytes: + // Measured: `3402` + // Estimated: `93492` + // Minimum execution time: 168_198_000 picoseconds. + Weight::from_parts(170_642_000, 93492) + .saturating_add(T::DbWeight::get().reads(39_u64)) + .saturating_add(T::DbWeight::get().writes(37_u64)) + } + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: UnknownTokens ConcreteFungibleBalances (r:1 w:0) + /// Proof Skipped: UnknownTokens ConcreteFungibleBalances (max_values: None, max_size: None, mode: Measured) + /// Storage: AssetRegistry LocationToAssetId (r:1 w:0) + /// Proof Skipped: AssetRegistry LocationToAssetId (max_values: None, max_size: None, mode: Measured) + fn run_xcmp_task() -> Weight { + // Proof Size summary in bytes: + // Measured: `481` + // Estimated: `3946` + // Minimum execution time: 38_174_000 picoseconds. + Weight::from_parts(38_529_000, 3946) + .saturating_add(T::DbWeight::get().reads(3_u64)) + } + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: System Account (r:1 w:1) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: ParachainStaking DelegatorState (r:1 w:1) + /// Proof Skipped: ParachainStaking DelegatorState (max_values: None, max_size: None, mode: Measured) + /// Storage: ParachainStaking DelegationScheduledRequests (r:1 w:0) + /// Proof Skipped: ParachainStaking DelegationScheduledRequests (max_values: None, max_size: None, mode: Measured) + /// Storage: Balances Locks (r:1 w:1) + /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: ParachainStaking CandidateInfo (r:1 w:1) + /// Proof Skipped: ParachainStaking CandidateInfo (max_values: None, max_size: None, mode: Measured) + /// Storage: ParachainStaking TopDelegations (r:1 w:1) + /// Proof Skipped: ParachainStaking TopDelegations (max_values: None, max_size: None, mode: Measured) + /// Storage: ParachainStaking CandidatePool (r:1 w:1) + /// Proof Skipped: ParachainStaking CandidatePool (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainStaking Total (r:1 w:1) + /// Proof Skipped: ParachainStaking Total (max_values: Some(1), max_size: None, mode: Measured) + fn run_auto_compound_delegated_stake_task() -> Weight { + // Proof Size summary in bytes: + // Measured: `1464` + // Estimated: `4929` + // Minimum execution time: 79_388_000 picoseconds. + Weight::from_parts(80_419_000, 4929) + .saturating_add(T::DbWeight::get().reads(10_u64)) + .saturating_add(T::DbWeight::get().writes(7_u64)) + } + /// Storage: Valve ValveClosed (r:1 w:0) + /// Proof Skipped: Valve ValveClosed (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: Valve ClosedPallets (r:1 w:0) + /// Proof Skipped: Valve ClosedPallets (max_values: None, max_size: None, mode: Measured) + fn run_dynamic_dispatch_action() -> Weight { + // Proof Size summary in bytes: + // Measured: `133` + // Estimated: `3598` + // Minimum execution time: 8_518_000 picoseconds. + Weight::from_parts(8_778_000, 3598) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + fn run_dynamic_dispatch_action_fail_decode() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 892_000 picoseconds. + Weight::from_parts(965_000, 0) + } + /// Storage: AutomationTime AccountTasks (r:1 w:1) + /// Proof Skipped: AutomationTime AccountTasks (max_values: None, max_size: None, mode: Measured) + /// The range of component `v` is `[0, 1]`. + fn run_missed_tasks_many_found(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + v * (278 ±0)` + // Estimated: `1871 + v * (1872 ±0)` + // Minimum execution time: 253_000 picoseconds. + Weight::from_parts(304_932, 1871) + // Standard Error: 9_053 + .saturating_add(Weight::from_parts(20_508_667, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(v.into()))) + .saturating_add(Weight::from_parts(0, 1872).saturating_mul(v.into())) + } + /// Storage: AutomationTime AccountTasks (r:1 w:0) + /// Proof Skipped: AutomationTime AccountTasks (max_values: None, max_size: None, mode: Measured) + /// The range of component `v` is `[0, 1]`. + fn run_missed_tasks_many_missing(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + v * (109 ±0)` + // Estimated: `1787 + v * (1787 ±0)` + // Minimum execution time: 252_000 picoseconds. + Weight::from_parts(298_306, 1787) + // Standard Error: 3_108 + .saturating_add(Weight::from_parts(9_098_593, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) + .saturating_add(Weight::from_parts(0, 1787).saturating_mul(v.into())) + } + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: AutomationTime AccountTasks (r:1 w:1) + /// Proof Skipped: AutomationTime AccountTasks (max_values: None, max_size: None, mode: Measured) + /// Storage: Valve ValveClosed (r:1 w:0) + /// Proof Skipped: Valve ValveClosed (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: Valve ClosedPallets (r:1 w:0) + /// Proof Skipped: Valve ClosedPallets (max_values: None, max_size: None, mode: Measured) + /// The range of component `v` is `[0, 1]`. + fn run_tasks_many_found(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `128 + v * (410 ±0)` + // Estimated: `2001 + v * (2002 ±0)` + // Minimum execution time: 3_384_000 picoseconds. + Weight::from_parts(3_570_563, 2001) + // Standard Error: 12_681 + .saturating_add(Weight::from_parts(41_205_036, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(v.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(v.into()))) + .saturating_add(Weight::from_parts(0, 2002).saturating_mul(v.into())) + } + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// The range of component `v` is `[0, 1]`. + fn run_tasks_many_missing(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `1493` + // Minimum execution time: 2_631_000 picoseconds. + Weight::from_parts(2_821_983, 1493) + // Standard Error: 9_654 + .saturating_add(Weight::from_parts(141_716, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + fn update_task_queue_overhead() -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `1493` + // Minimum execution time: 2_626_000 picoseconds. + Weight::from_parts(2_747_000, 1493) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: AutomationTime ScheduledTasksV3 (r:11 w:1) + /// Proof Skipped: AutomationTime ScheduledTasksV3 (max_values: None, max_size: None, mode: Measured) + /// Storage: AutomationTime MissedQueueV2 (r:1 w:1) + /// Proof Skipped: AutomationTime MissedQueueV2 (max_values: Some(1), max_size: None, mode: Measured) + /// The range of component `v` is `[0, 2]`. + fn append_to_missed_tasks(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `109 + v * (107 ±0)` + // Estimated: `13581 + v * (1065 ±0)` + // Minimum execution time: 3_285_000 picoseconds. + Weight::from_parts(3_350_768, 13581) + // Standard Error: 37_885 + .saturating_add(Weight::from_parts(9_206_325, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) + .saturating_add(T::DbWeight::get().writes(1_u64)) + .saturating_add(Weight::from_parts(0, 1065).saturating_mul(v.into())) + } + /// Storage: AutomationTime TaskQueueV2 (r:1 w:1) + /// Proof Skipped: AutomationTime TaskQueueV2 (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: AutomationTime MissedQueueV2 (r:1 w:1) + /// Proof Skipped: AutomationTime MissedQueueV2 (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: AutomationTime ScheduledTasksV3 (r:1 w:1) + /// Proof Skipped: AutomationTime ScheduledTasksV3 (max_values: None, max_size: None, mode: Measured) + fn update_scheduled_task_queue() -> Weight { + // Proof Size summary in bytes: + // Measured: `8910` + // Estimated: `12375` + // Minimum execution time: 45_128_000 picoseconds. + Weight::from_parts(45_812_000, 12375) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + /// Storage: AutomationTime ScheduledTasksV3 (r:1 w:0) + /// Proof Skipped: AutomationTime ScheduledTasksV3 (max_values: None, max_size: None, mode: Measured) + fn shift_missed_tasks() -> Weight { + // Proof Size summary in bytes: + // Measured: `216` + // Estimated: `3681` + // Minimum execution time: 6_002_000 picoseconds. + Weight::from_parts(6_175_000, 3681) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: AssetRegistry LocationToAssetId (r:2 w:0) + /// Proof Skipped: AssetRegistry LocationToAssetId (max_values: None, max_size: None, mode: Measured) + /// Storage: AssetRegistry Metadata (r:1 w:0) + /// Proof Skipped: AssetRegistry Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: Tokens Accounts (r:2 w:2) + /// Proof: Tokens Accounts (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) + /// Storage: System Account (r:3 w:1) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: AutomationTime AccountTasks (r:1 w:1) + /// Proof Skipped: AutomationTime AccountTasks (max_values: None, max_size: None, mode: Measured) + /// Storage: AutomationTime ScheduledTasksV3 (r:36 w:36) + /// Proof Skipped: AutomationTime ScheduledTasksV3 (max_values: None, max_size: None, mode: Measured) + /// Storage: Tokens TotalIssuance (r:1 w:1) + /// Proof: Tokens TotalIssuance (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) + /// The range of component `v` is `[1, 36]`. + fn schedule_xcmp_task_full(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `2072 + v * (8712 ±0)` + // Estimated: `8799 + v * (11187 ±0)` + // Minimum execution time: 206_501_000 picoseconds. + Weight::from_parts(173_102_820, 8799) + // Standard Error: 19_713 + .saturating_add(Weight::from_parts(36_732_883, 0).saturating_mul(v.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) + .saturating_add(RocksDbWeight::get().writes(5_u64)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(v.into()))) + .saturating_add(Weight::from_parts(0, 11187).saturating_mul(v.into())) + } + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: AssetRegistry LocationToAssetId (r:1 w:0) + /// Proof Skipped: AssetRegistry LocationToAssetId (max_values: None, max_size: None, mode: Measured) + /// Storage: System Account (r:2 w:2) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: AutomationTime AccountTasks (r:1 w:1) + /// Proof Skipped: AutomationTime AccountTasks (max_values: None, max_size: None, mode: Measured) + /// Storage: AutomationTime ScheduledTasksV3 (r:1 w:1) + /// Proof Skipped: AutomationTime ScheduledTasksV3 (max_values: None, max_size: None, mode: Measured) + fn schedule_auto_compound_delegated_stake_task_full() -> Weight { + // Proof Size summary in bytes: + // Measured: `6543` + // Estimated: `10008` + // Minimum execution time: 113_525_000 picoseconds. + Weight::from_parts(115_133_000, 10008) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: AssetRegistry LocationToAssetId (r:1 w:0) + /// Proof Skipped: AssetRegistry LocationToAssetId (max_values: None, max_size: None, mode: Measured) + /// Storage: System Account (r:2 w:2) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: AutomationTime AccountTasks (r:1 w:1) + /// Proof Skipped: AutomationTime AccountTasks (max_values: None, max_size: None, mode: Measured) + /// Storage: AutomationTime ScheduledTasksV3 (r:36 w:36) + /// Proof Skipped: AutomationTime ScheduledTasksV3 (max_values: None, max_size: None, mode: Measured) + /// The range of component `v` is `[1, 36]`. + fn schedule_dynamic_dispatch_task(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `790` + // Estimated: `6196 + v * (2475 ±0)` + // Minimum execution time: 83_137_000 picoseconds. + Weight::from_parts(80_333_154, 6196) + // Standard Error: 3_420 + .saturating_add(Weight::from_parts(3_986_820, 0).saturating_mul(v.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(v.into()))) + .saturating_add(Weight::from_parts(0, 2475).saturating_mul(v.into())) + } + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: AssetRegistry LocationToAssetId (r:1 w:0) + /// Proof Skipped: AssetRegistry LocationToAssetId (max_values: None, max_size: None, mode: Measured) + /// Storage: System Account (r:2 w:2) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: AutomationTime AccountTasks (r:1 w:1) + /// Proof Skipped: AutomationTime AccountTasks (max_values: None, max_size: None, mode: Measured) + /// Storage: AutomationTime ScheduledTasksV3 (r:36 w:36) + /// Proof Skipped: AutomationTime ScheduledTasksV3 (max_values: None, max_size: None, mode: Measured) + /// The range of component `v` is `[1, 36]`. + fn schedule_dynamic_dispatch_task_full(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1673 + v * (8712 ±0)` + // Estimated: `6196 + v * (11187 ±0)` + // Minimum execution time: 132_530_000 picoseconds. + Weight::from_parts(96_845_515, 6196) + // Standard Error: 16_049 + .saturating_add(Weight::from_parts(38_443_161, 0).saturating_mul(v.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(v.into()))) + .saturating_add(Weight::from_parts(0, 11187).saturating_mul(v.into())) + } + /// Storage: AutomationTime AccountTasks (r:1 w:1) + /// Proof Skipped: AutomationTime AccountTasks (max_values: None, max_size: None, mode: Measured) + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: AutomationTime LastTimeSlot (r:1 w:0) + /// Proof Skipped: AutomationTime LastTimeSlot (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: AutomationTime ScheduledTasksV3 (r:36 w:36) + /// Proof Skipped: AutomationTime ScheduledTasksV3 (max_values: None, max_size: None, mode: Measured) + fn cancel_scheduled_task_full() -> Weight { + // Proof Size summary in bytes: + // Measured: `316368` + // Estimated: `406458` + // Minimum execution time: 1_380_349_000 picoseconds. + Weight::from_parts(1_410_325_000, 406458) + .saturating_add(RocksDbWeight::get().reads(39_u64)) + .saturating_add(RocksDbWeight::get().writes(37_u64)) + } + /// Storage: AutomationTime AccountTasks (r:1 w:1) + /// Proof Skipped: AutomationTime AccountTasks (max_values: None, max_size: None, mode: Measured) + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: AutomationTime LastTimeSlot (r:1 w:0) + /// Proof Skipped: AutomationTime LastTimeSlot (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: AutomationTime ScheduledTasksV3 (r:1 w:1) + /// Proof Skipped: AutomationTime ScheduledTasksV3 (max_values: None, max_size: None, mode: Measured) + fn force_cancel_scheduled_task() -> Weight { + // Proof Size summary in bytes: + // Measured: `462` + // Estimated: `3927` + // Minimum execution time: 28_671_000 picoseconds. + Weight::from_parts(29_022_000, 3927) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: AutomationTime AccountTasks (r:1 w:1) + /// Proof Skipped: AutomationTime AccountTasks (max_values: None, max_size: None, mode: Measured) + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: AutomationTime LastTimeSlot (r:1 w:0) + /// Proof Skipped: AutomationTime LastTimeSlot (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: AutomationTime ScheduledTasksV3 (r:36 w:36) + /// Proof Skipped: AutomationTime ScheduledTasksV3 (max_values: None, max_size: None, mode: Measured) + fn force_cancel_scheduled_task_full() -> Weight { + // Proof Size summary in bytes: + // Measured: `316368` + // Estimated: `406458` + // Minimum execution time: 1_414_746_000 picoseconds. + Weight::from_parts(1_422_630_000, 406458) + .saturating_add(RocksDbWeight::get().reads(39_u64)) + .saturating_add(RocksDbWeight::get().writes(37_u64)) + } + /// Storage: AutomationTime AccountTasks (r:1 w:1) + /// Proof Skipped: AutomationTime AccountTasks (max_values: None, max_size: None, mode: Measured) + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: AutomationTime LastTimeSlot (r:1 w:0) + /// Proof Skipped: AutomationTime LastTimeSlot (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: AutomationTime ScheduledTasksV3 (r:36 w:36) + /// Proof Skipped: AutomationTime ScheduledTasksV3 (max_values: None, max_size: None, mode: Measured) + fn cancel_task_with_schedule_as_full() -> Weight { + // Proof Size summary in bytes: + // Measured: `3402` + // Estimated: `93492` + // Minimum execution time: 168_198_000 picoseconds. + Weight::from_parts(170_642_000, 93492) + .saturating_add(RocksDbWeight::get().reads(39_u64)) + .saturating_add(RocksDbWeight::get().writes(37_u64)) + } + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: UnknownTokens ConcreteFungibleBalances (r:1 w:0) + /// Proof Skipped: UnknownTokens ConcreteFungibleBalances (max_values: None, max_size: None, mode: Measured) + /// Storage: AssetRegistry LocationToAssetId (r:1 w:0) + /// Proof Skipped: AssetRegistry LocationToAssetId (max_values: None, max_size: None, mode: Measured) + fn run_xcmp_task() -> Weight { + // Proof Size summary in bytes: + // Measured: `481` + // Estimated: `3946` + // Minimum execution time: 38_174_000 picoseconds. + Weight::from_parts(38_529_000, 3946) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + } + /// Storage: ParachainInfo ParachainId (r:1 w:0) + /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: System Account (r:1 w:1) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: ParachainStaking DelegatorState (r:1 w:1) + /// Proof Skipped: ParachainStaking DelegatorState (max_values: None, max_size: None, mode: Measured) + /// Storage: ParachainStaking DelegationScheduledRequests (r:1 w:0) + /// Proof Skipped: ParachainStaking DelegationScheduledRequests (max_values: None, max_size: None, mode: Measured) + /// Storage: Balances Locks (r:1 w:1) + /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: ParachainStaking CandidateInfo (r:1 w:1) + /// Proof Skipped: ParachainStaking CandidateInfo (max_values: None, max_size: None, mode: Measured) + /// Storage: ParachainStaking TopDelegations (r:1 w:1) + /// Proof Skipped: ParachainStaking TopDelegations (max_values: None, max_size: None, mode: Measured) + /// Storage: ParachainStaking CandidatePool (r:1 w:1) + /// Proof Skipped: ParachainStaking CandidatePool (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainStaking Total (r:1 w:1) + /// Proof Skipped: ParachainStaking Total (max_values: Some(1), max_size: None, mode: Measured) + fn run_auto_compound_delegated_stake_task() -> Weight { + // Proof Size summary in bytes: + // Measured: `1464` + // Estimated: `4929` + // Minimum execution time: 79_388_000 picoseconds. + Weight::from_parts(80_419_000, 4929) + .saturating_add(RocksDbWeight::get().reads(10_u64)) + .saturating_add(RocksDbWeight::get().writes(7_u64)) + } + /// Storage: Valve ValveClosed (r:1 w:0) + /// Proof Skipped: Valve ValveClosed (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: Valve ClosedPallets (r:1 w:0) + /// Proof Skipped: Valve ClosedPallets (max_values: None, max_size: None, mode: Measured) + fn run_dynamic_dispatch_action() -> Weight { + // Proof Size summary in bytes: + // Measured: `133` + // Estimated: `3598` + // Minimum execution time: 8_518_000 picoseconds. + Weight::from_parts(8_778_000, 3598) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + } + fn run_dynamic_dispatch_action_fail_decode() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 892_000 picoseconds. + Weight::from_parts(965_000, 0) + } + /// Storage: AutomationTime AccountTasks (r:1 w:1) + /// Proof Skipped: AutomationTime AccountTasks (max_values: None, max_size: None, mode: Measured) + /// The range of component `v` is `[0, 1]`. + fn run_missed_tasks_many_found(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + v * (278 ±0)` + // Estimated: `1871 + v * (1872 ±0)` + // Minimum execution time: 253_000 picoseconds. + Weight::from_parts(304_932, 1871) + // Standard Error: 9_053 + .saturating_add(Weight::from_parts(20_508_667, 0).saturating_mul(v.into())) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(v.into()))) + .saturating_add(Weight::from_parts(0, 1872).saturating_mul(v.into())) + } + /// Storage: AutomationTime AccountTasks (r:1 w:0) + /// Proof Skipped: AutomationTime AccountTasks (max_values: None, max_size: None, mode: Measured) + /// The range of component `v` is `[0, 1]`. + fn run_missed_tasks_many_missing(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + v * (109 ±0)` + // Estimated: `1787 + v * (1787 ±0)` + // Minimum execution time: 252_000 picoseconds. + Weight::from_parts(298_306, 1787) + // Standard Error: 3_108 + .saturating_add(Weight::from_parts(9_098_593, 0).saturating_mul(v.into())) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) + .saturating_add(Weight::from_parts(0, 1787).saturating_mul(v.into())) + } + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: AutomationTime AccountTasks (r:1 w:1) + /// Proof Skipped: AutomationTime AccountTasks (max_values: None, max_size: None, mode: Measured) + /// Storage: Valve ValveClosed (r:1 w:0) + /// Proof Skipped: Valve ValveClosed (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: Valve ClosedPallets (r:1 w:0) + /// Proof Skipped: Valve ClosedPallets (max_values: None, max_size: None, mode: Measured) + /// The range of component `v` is `[0, 1]`. + fn run_tasks_many_found(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `128 + v * (410 ±0)` + // Estimated: `2001 + v * (2002 ±0)` + // Minimum execution time: 3_384_000 picoseconds. + Weight::from_parts(3_570_563, 2001) + // Standard Error: 12_681 + .saturating_add(Weight::from_parts(41_205_036, 0).saturating_mul(v.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(v.into()))) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(v.into()))) + .saturating_add(Weight::from_parts(0, 2002).saturating_mul(v.into())) + } + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// The range of component `v` is `[0, 1]`. + fn run_tasks_many_missing(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `1493` + // Minimum execution time: 2_631_000 picoseconds. + Weight::from_parts(2_821_983, 1493) + // Standard Error: 9_654 + .saturating_add(Weight::from_parts(141_716, 0).saturating_mul(v.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + /// Storage: Timestamp Now (r:1 w:0) + /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + fn update_task_queue_overhead() -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `1493` + // Minimum execution time: 2_626_000 picoseconds. + Weight::from_parts(2_747_000, 1493) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + /// Storage: AutomationTime ScheduledTasksV3 (r:11 w:1) + /// Proof Skipped: AutomationTime ScheduledTasksV3 (max_values: None, max_size: None, mode: Measured) + /// Storage: AutomationTime MissedQueueV2 (r:1 w:1) + /// Proof Skipped: AutomationTime MissedQueueV2 (max_values: Some(1), max_size: None, mode: Measured) + /// The range of component `v` is `[0, 2]`. + fn append_to_missed_tasks(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `109 + v * (107 ±0)` + // Estimated: `13581 + v * (1065 ±0)` + // Minimum execution time: 3_285_000 picoseconds. + Weight::from_parts(3_350_768, 13581) + // Standard Error: 37_885 + .saturating_add(Weight::from_parts(9_206_325, 0).saturating_mul(v.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + .saturating_add(Weight::from_parts(0, 1065).saturating_mul(v.into())) + } + /// Storage: AutomationTime TaskQueueV2 (r:1 w:1) + /// Proof Skipped: AutomationTime TaskQueueV2 (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: AutomationTime MissedQueueV2 (r:1 w:1) + /// Proof Skipped: AutomationTime MissedQueueV2 (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: AutomationTime ScheduledTasksV3 (r:1 w:1) + /// Proof Skipped: AutomationTime ScheduledTasksV3 (max_values: None, max_size: None, mode: Measured) + fn update_scheduled_task_queue() -> Weight { + // Proof Size summary in bytes: + // Measured: `8910` + // Estimated: `12375` + // Minimum execution time: 45_128_000 picoseconds. + Weight::from_parts(45_812_000, 12375) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: AutomationTime ScheduledTasksV3 (r:1 w:0) + /// Proof Skipped: AutomationTime ScheduledTasksV3 (max_values: None, max_size: None, mode: Measured) + fn shift_missed_tasks() -> Weight { + // Proof Size summary in bytes: + // Measured: `216` + // Estimated: `3681` + // Minimum execution time: 6_002_000 picoseconds. + Weight::from_parts(6_175_000, 3681) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } +} diff --git a/pallets/xcmp-handler/Cargo.toml b/pallets/xcmp-handler/Cargo.toml new file mode 100644 index 000000000..87ecf7d45 --- /dev/null +++ b/pallets/xcmp-handler/Cargo.toml @@ -0,0 +1,112 @@ +[package] +name = "pallet-xcmp-handler" +authors = [ "Ava Protocol Team" ] +description = "Pallet to handle XCMP intricacies." +edition = "2021" +homepage = "https://avaprotocol.org" +repository = "https://github.com/AvaProtocol/tanssi-integration" +version = "0.1.0" + +[package.metadata.docs.rs] +targets = [ "x86_64-unknown-linux-gnu" ] + +[dependencies] +log = { workspace = true } +parity-scale-codec = { workspace = true, features = [ "derive" ] } +scale-info = { workspace = true, features = [ "derive" ] } +serde = { workspace = true } + +# Substrate Dependencies +## Substrate Primitive Dependencies +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } + +## Substrate FRAME Dependencies +frame-benchmarking = { workspace = true, optional = true } +frame-support = { workspace = true } +frame-system = { workspace = true } + +# Cumulus dependencies +cumulus-primitives-core = { workspace = true } + + +# Polkadot Dependencies +polkadot-parachain-primitives = { workspace = true } +staging-xcm = { workspace = true } +staging-xcm-executor = { workspace = true } + +# ORML +orml-currencies = { workspace = true } +orml-tokens = { workspace = true } +orml-traits = { workspace = true } + +[dev-dependencies] +# Substrate +pallet-balances = { workspace = true } +sp-core = { workspace = true } + +# Cumulus dependencies +cumulus-pallet-xcm = { workspace = true } +parachain-info = { workspace = true } + +# Polkadot Dependencies +pallet-xcm = { workspace = true } +polkadot-parachain-primitives = { workspace = true } +staging-xcm-builder = { workspace = true } +staging-xcm-executor = { workspace = true } + +ava-protocol-primitives = { workspace = true } + +[features] +default = [ "std" ] +std = [ + "ava-protocol-primitives/std", + "cumulus-pallet-xcm/std", + "cumulus-primitives-core/std", + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", + "log/std", + "orml-currencies/std", + "orml-tokens/std", + "orml-traits/std", + "pallet-balances/std", + "pallet-xcm/std", + "parachain-info/std", + "parity-scale-codec/std", + "polkadot-parachain-primitives/std", + "scale-info/std", + "serde/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "staging-xcm-builder/std", + "staging-xcm-executor/std", + "staging-xcm/std", +] +runtime-benchmarks = [ + "cumulus-primitives-core/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "orml-tokens/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-xcm/runtime-benchmarks", + "polkadot-parachain-primitives/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "staging-xcm-builder/runtime-benchmarks", + "staging-xcm-executor/runtime-benchmarks", +] +try-runtime = [ + "cumulus-pallet-xcm/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "orml-currencies/try-runtime", + "orml-tokens/try-runtime", + "pallet-balances/try-runtime", + "pallet-xcm/try-runtime", + "parachain-info/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/pallets/xcmp-handler/src/lib.rs b/pallets/xcmp-handler/src/lib.rs new file mode 100644 index 000000000..43d351ad2 --- /dev/null +++ b/pallets/xcmp-handler/src/lib.rs @@ -0,0 +1,592 @@ +// This file is part of Ava Protocol. + +// Copyright (C) 2022 Ava Protocol +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # XCMP Handler pallet +//! +//! This pallet is used to send XCM Transact messages to other chains. +//! In order to do that it needs to keep track of what tokens other chains accept, +//! and the relevant rates. +//! +//! At this moment we only support using our native currency. We are looking into supporting +//! other chain's native currency and then any currency. + +#![cfg_attr(not(feature = "std"), no_std)] + +pub use pallet::*; + +#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; + +#[cfg(test)] +mod mock; + +#[cfg(test)] +mod tests; + +// pub mod migrations; + +use cumulus_primitives_core::ParaId; +use frame_support::pallet_prelude::*; +use staging_xcm::{latest::InteriorLocation, prelude::*}; + +// staging_xcm_executor::traits::ConvertLocation, + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use orml_traits::{location::Reserve, MultiCurrency}; + use polkadot_parachain_primitives::primitives::Sibling; + use sp_runtime::{ + traits::{AccountIdConversion, CheckedSub, Convert, SaturatedConversion}, + TokenError::BelowMinimum, + }; + use sp_std::prelude::*; + use staging_xcm_executor::traits::WeightBounds; + + pub type MultiCurrencyId = <::MultiCurrency as MultiCurrency< + ::AccountId, + >>::CurrencyId; + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + type RuntimeCall: From> + Encode; + + /// The MultiCurrency type for interacting with balances + type MultiCurrency: MultiCurrency; + + /// The currencyIds that our chain supports. + type CurrencyId: Parameter + + Member + + Copy + + MaybeSerializeDeserialize + + Ord + + TypeInfo + + MaxEncodedLen + + From> + + Into>; + + /// The currencyId for the native currency. + #[pallet::constant] + type GetNativeCurrencyId: Get; + + //The paraId of this chain. + type SelfParaId: Get; + + /// Convert an accountId to a Location. + type AccountIdToLocation: Convert; + + /// Convert a CurrencyId to a Location. + type CurrencyIdToLocation: Convert>; + + /// This chain's Universal Location. + type UniversalLocation: Get; + + /// Utility for sending XCM messages. + type XcmSender: SendXcm; + + /// Utility for executing XCM instructions. + type XcmExecutor: ExecuteXcm<::RuntimeCall>; + + /// Utility for determining XCM instruction weights. + type Weigher: WeightBounds<::RuntimeCall>; + + /// The way to retreave the reserve of a Asset. This can be + /// configured to accept absolute or relative paths for self tokens + type ReserveProvider: Reserve; + + /// Self chain location. + #[pallet::constant] + type SelfLocation: Get; + } + + const STORAGE_VERSION: StorageVersion = StorageVersion::new(0); + + #[pallet::pallet] + #[pallet::without_storage_info] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(_); + + #[pallet::event] + #[pallet::generate_deposit(pub(crate) fn deposit_event)] + pub enum Event { + /// XCM sent to target chain. + XcmSent { + destination: Location, + }, + /// XCM transacted in local chain. + XcmTransactedLocally, + /// XCM fees successfully paid. + XcmFeesPaid { + source: T::AccountId, + dest: T::AccountId, + }, + /// XCM fees failed to transfer. + XcmFeesFailed { + source: T::AccountId, + dest: T::AccountId, + error: DispatchError, + }, + TransactInfoChanged { + destination: Location, + }, + TransactInfoRemoved { + destination: Location, + }, + } + + #[pallet::error] + pub enum Error { + /// Either the weight or fee per second is too large. + FeeOverflow, + /// Either the instruction weight or the transact weight is too large. + WeightOverflow, + /// Failed when creating the Location for descend origin. + FailedLocationToJunction, + /// Unable to reanchor the asset. + CannotReanchor, + /// Failed to send XCM to target. + ErrorSendingXcmToTarget, + /// Failed to execute XCM in local chain. + XcmExecutionFailed, + /// Failed to get weight of call. + ErrorGettingCallWeight, + /// The version of the `VersionedLocation` value used is not able + /// to be interpreted. + BadVersion, + // Asset not found + TransactInfoNotFound, + // Invalid asset location. + InvalidAssetLocation, + // The fee payment asset location is not supported. + UnsupportedFeePayment, + } + + #[pallet::call] + impl Pallet {} + + impl Pallet { + /// Get the instructions for a transact xcm. + /// Currently we only support instructions if the currency is the local chain's. + /// + /// Returns two instructions sets. + /// The first is to execute locally. + /// The second is to execute on the target chain. + pub fn get_instruction_set( + destination: Location, + asset_location: Location, + fee: u128, + caller: T::AccountId, + transact_encoded_call: Vec, + transact_encoded_call_weight: Weight, + overall_weight: Weight, + flow: InstructionSequence, + ) -> Result< + ( + staging_xcm::latest::Xcm<::RuntimeCall>, + staging_xcm::latest::Xcm<()>, + ), + DispatchError, + > { + let descend_location: Junctions = T::AccountIdToLocation::convert(caller) + .try_into() + .map_err(|_| Error::::FailedLocationToJunction)?; + + let instructions = match flow { + InstructionSequence::PayThroughSovereignAccount => { + Self::get_local_currency_instructions( + destination, + asset_location, + descend_location, + transact_encoded_call, + transact_encoded_call_weight, + overall_weight, + fee, + )? + } + InstructionSequence::PayThroughRemoteDerivativeAccount => { + Self::get_alternate_flow_instructions( + destination, + asset_location, + descend_location, + transact_encoded_call, + transact_encoded_call_weight, + overall_weight, + fee, + )? + } + }; + + Ok(instructions) + } + + /// Construct the instructions for a transact xcm with our local currency. + /// + /// Local instructions + /// - WithdrawAsset + /// - DepositAsset + /// + /// Target instructions + /// - ReserveAssetDeposited + /// - BuyExecution + /// - DescendOrigin + /// - Transact + /// - RefundSurplus + /// - DepositAsset + pub fn get_local_currency_instructions( + destination: Location, + asset_location: Location, + descend_location: Junctions, + transact_encoded_call: Vec, + transact_encoded_call_weight: Weight, + overall_weight: Weight, + fee: u128, + ) -> Result< + ( + staging_xcm::latest::Xcm<::RuntimeCall>, + staging_xcm::latest::Xcm<()>, + ), + DispatchError, + > { + let local_asset = Asset { + id: asset_location.into(), + fun: Fungibility::Fungible(fee), + }; + + let target_asset = local_asset + .clone() + .reanchored(&destination.clone(), &T::UniversalLocation::get()) + .map_err(|_| Error::::CannotReanchor)?; + + let reserve = T::ReserveProvider::reserve(&local_asset) + .ok_or(Error::::InvalidAssetLocation)?; + + let (local_xcm, target_xcm) = if reserve == Location::here() { + let local_xcm = Xcm(vec![ + WithdrawAsset::<::RuntimeCall>(local_asset.into()), + DepositAsset::<::RuntimeCall> { + assets: Wild(All), + beneficiary: destination.clone(), + }, + ]); + let target_xcm = Xcm(vec![ + ReserveAssetDeposited::<()>(target_asset.clone().into()), + BuyExecution::<()> { + fees: target_asset, + weight_limit: Limited(overall_weight), + }, + DescendOrigin::<()>(descend_location), + Transact::<()> { + origin_kind: OriginKind::SovereignAccount, + require_weight_at_most: transact_encoded_call_weight, + call: transact_encoded_call.into(), + }, + RefundSurplus::<()>, + DepositAsset::<()> { + assets: Wild(AllCounted(1)), + beneficiary: T::SelfLocation::get(), + }, + ]); + (local_xcm, target_xcm) + } else if reserve == destination { + let local_xcm = Xcm(vec![ + WithdrawAsset::<::RuntimeCall>(local_asset.clone().into()), + BurnAsset::<::RuntimeCall>(local_asset.into()), + ]); + let target_xcm = Xcm(vec![ + WithdrawAsset::<()>(target_asset.clone().into()), + BuyExecution::<()> { + fees: target_asset, + weight_limit: Limited(overall_weight), + }, + DescendOrigin::<()>(descend_location), + Transact::<()> { + origin_kind: OriginKind::SovereignAccount, + require_weight_at_most: transact_encoded_call_weight, + call: transact_encoded_call.into(), + }, + RefundSurplus::<()>, + DepositAsset::<()> { + assets: Wild(AllCounted(1)), + beneficiary: T::SelfLocation::get(), + }, + ]); + (local_xcm, target_xcm) + } else { + return Err(Error::::UnsupportedFeePayment.into()); + }; + + Ok((local_xcm, target_xcm)) + } + + /// Construct the alternate xcm flow instructions + /// + /// There are no local instructions since the user's account is already funded on the target chain + /// + /// Target instructions + /// - DescendOrigin + /// - WithdrawAsset + /// - BuyExecution + /// - Transact + /// - RefundSurplus + /// - DepositAsset + fn get_alternate_flow_instructions( + destination: Location, + asset_location: Location, + descend_location: Junctions, + transact_encoded_call: Vec, + transact_encoded_call_weight: Weight, + xcm_weight: Weight, + fee: u128, + ) -> Result< + ( + staging_xcm::latest::Xcm<::RuntimeCall>, + staging_xcm::latest::Xcm<()>, + ), + DispatchError, + > { + // XCM for target chain + let target_asset = Asset { + id: asset_location.into(), + fun: Fungibility::Fungible(fee), + } + .reanchored(&destination, &T::UniversalLocation::get()) + .map_err(|_| Error::::CannotReanchor)?; + + let target_xcm = Xcm(vec![ + DescendOrigin::<()>(descend_location), + WithdrawAsset::<()>(target_asset.clone().into()), + BuyExecution::<()> { + fees: target_asset, + weight_limit: Limited(xcm_weight), + }, + Transact::<()> { + origin_kind: OriginKind::SovereignAccount, + require_weight_at_most: transact_encoded_call_weight, + call: transact_encoded_call.into(), + }, + ]); + + Ok((Xcm(vec![]), target_xcm)) + } + + /// Transact XCM instructions on local chain + /// + pub fn transact_in_local_chain( + internal_instructions: staging_xcm::latest::Xcm<::RuntimeCall>, + ) -> Result<(), DispatchError> { + let local_sovereign_account = T::SelfLocation::get(); + let weight = T::Weigher::weight(&mut internal_instructions.clone().into()) + .map_err(|_| Error::::ErrorGettingCallWeight)?; + let mut hash = internal_instructions.using_encoded(sp_io::hashing::blake2_256); + + // Execute instruction on local chain + T::XcmExecutor::prepare_and_execute( + local_sovereign_account, + internal_instructions.into(), + &mut hash, + weight, + weight, + ) + .ensure_complete() + .map_err(|error| { + log::error!("Failed execute in credit with {:?}", error); + Error::::XcmExecutionFailed + })?; + + Self::deposit_event(Event::XcmTransactedLocally); + + Ok(()) + } + + /// Send XCM instructions to parachain. + /// + pub fn transact_in_target_chain( + destination: Location, + target_instructions: staging_xcm::latest::Xcm<()>, + ) -> Result<(), DispatchError> { + #[allow(unused_variables)] + let destination_location = destination.clone(); + + #[cfg(all(not(test), feature = "runtime-benchmarks"))] + let destination_location = Location::new(1, Here); + + // Send to target chain + send_xcm::(destination_location, target_instructions).map_err( + |error| { + log::error!("Failed to send xcm to {:?} with {:?}", destination, error); + Error::::ErrorSendingXcmToTarget + }, + )?; + + Self::deposit_event(Event::XcmSent { destination }); + + Ok(()) + } + + /// Create and transact instructions. + /// Currently we only support if the currency is the local chain's. + /// + /// Get the instructions for a transact xcm. + /// Execute local transact instructions. + /// Send target transact instructions. + pub fn transact_xcm( + destination: Location, + asset_location: Location, + fee: u128, + caller: T::AccountId, + transact_encoded_call: Vec, + transact_encoded_call_weight: Weight, + overall_weight: Weight, + flow: InstructionSequence, + ) -> Result<(), DispatchError> { + let (local_instructions, target_instructions) = Self::get_instruction_set( + destination.clone(), + asset_location, + fee, + caller, + transact_encoded_call, + transact_encoded_call_weight, + overall_weight, + flow, + )?; + + Self::transact_in_local_chain(local_instructions)?; + Self::transact_in_target_chain(destination, target_instructions)?; + + Ok(()) + } + + fn do_pay_xcm_fee( + currency_id: T::CurrencyId, + source: T::AccountId, + dest: T::AccountId, + fee: u128, + ) -> Result<(), DispatchError> { + let free_balance = T::MultiCurrency::free_balance(currency_id.into(), &source); + let min_balance = T::MultiCurrency::minimum_balance(currency_id.into()); + + free_balance + .checked_sub(&fee.saturated_into()) + .and_then(|balance_minus_fee| balance_minus_fee.checked_sub(&min_balance)) + .ok_or(DispatchError::Token(BelowMinimum))?; + + T::MultiCurrency::ensure_can_withdraw( + currency_id.into(), + &source, + fee.saturated_into(), + )?; + + T::MultiCurrency::transfer(currency_id.into(), &source, &dest, fee.saturated_into())?; + + Ok(()) + } + + /// Pay for XCMP fees. + /// Transfers fee from payer account to the local chain sovereign account. + /// + pub fn pay_xcm_fee( + currency_id: T::CurrencyId, + source: T::AccountId, + fee: u128, + ) -> Result<(), DispatchError> { + let local_sovereign_account: T::AccountId = + Sibling::from(T::SelfParaId::get()).into_account_truncating(); + match Self::do_pay_xcm_fee( + currency_id, + source.clone(), + local_sovereign_account.clone(), + fee, + ) { + Ok(_number) => Self::deposit_event(Event::XcmFeesPaid { + source, + dest: local_sovereign_account, + }), + Err(e) => Self::deposit_event(Event::XcmFeesFailed { + source, + dest: local_sovereign_account, + error: e, + }), + }; + + Ok(()) + } + } +} + +pub trait XcmpTransactor { + fn transact_xcm( + destination: Location, + asset_location: Location, + fee: u128, + caller: AccountId, + transact_encoded_call: sp_std::vec::Vec, + transact_encoded_call_weight: Weight, + overall_weight: Weight, + flow: InstructionSequence, + ) -> Result<(), sp_runtime::DispatchError>; + + fn pay_xcm_fee( + currency_id: CurrencyId, + source: AccountId, + fee: u128, + ) -> Result<(), sp_runtime::DispatchError>; +} + +impl XcmpTransactor for Pallet { + fn transact_xcm( + destination: Location, + asset_location: Location, + fee: u128, + caller: T::AccountId, + transact_encoded_call: sp_std::vec::Vec, + transact_encoded_call_weight: Weight, + overall_weight: Weight, + flow: InstructionSequence, + ) -> Result<(), sp_runtime::DispatchError> { + Self::transact_xcm( + destination, + asset_location, + fee, + caller, + transact_encoded_call, + transact_encoded_call_weight, + overall_weight, + flow, + )?; + + Ok(()) + } + + fn pay_xcm_fee( + currency_id: T::CurrencyId, + source: T::AccountId, + fee: u128, + ) -> Result<(), sp_runtime::DispatchError> { + Self::pay_xcm_fee(currency_id, source, fee)?; + + Ok(()) + } +} + +#[derive(Clone, Copy, Debug, Encode, Eq, Decode, PartialEq, TypeInfo)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +pub enum InstructionSequence { + PayThroughSovereignAccount, + PayThroughRemoteDerivativeAccount, +} diff --git a/pallets/xcmp-handler/src/mock.rs b/pallets/xcmp-handler/src/mock.rs new file mode 100644 index 000000000..4aee5e5ca --- /dev/null +++ b/pallets/xcmp-handler/src/mock.rs @@ -0,0 +1,396 @@ +// This file is part of Ava Protocol. + +// Copyright (C) 2022 Ava Protocol +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{self as pallet_xcmp_handler}; +use core::cell::RefCell; +use frame_support::{ + parameter_types, + traits::{ConstU32, Everything, Nothing}, +}; +use frame_system as system; +use pallet_xcm::XcmPassthrough; +use polkadot_parachain_primitives::primitives::Sibling; +use sp_core::H256; +use sp_runtime::{ + traits::{BlakeTwo256, Convert, IdentityLookup}, + AccountId32, BuildStorage, +}; +use staging_xcm::latest::{prelude::*, Weight}; +use staging_xcm_builder::{ + AccountId32Aliases, AllowUnpaidExecutionFrom, EnsureXcmOrigin, FixedWeightBounds, + FrameTransactionalProcessor, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, + SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, +}; +use staging_xcm_executor::{ + traits::{TransactAsset, WeightTrader}, + AssetsInHolding, XcmExecutor, +}; + +use orml_traits::parameter_type_with_key; + +use ava_protocol_primitives::AbsoluteAndRelativeReserveProvider; + +type Block = frame_system::mocking::MockBlock; +pub type AccountId = AccountId32; +pub type Balance = u128; +pub type CurrencyId = u32; + +pub const ALICE: AccountId32 = AccountId32::new([0u8; 32]); +pub const LOCAL_PARA_ID: u32 = 2114; +pub const NATIVE: CurrencyId = 0; + +frame_support::construct_runtime!( + pub enum Test + { + System: system, + Balances: pallet_balances, + ParachainInfo: parachain_info, + XcmpHandler: pallet_xcmp_handler, + XcmPallet: pallet_xcm, + CumulusXcm: cumulus_pallet_xcm, + Currencies: orml_currencies, + Tokens: orml_tokens, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 51; +} + +pub type LocalOriginToLocation = SignedToAccountId32; +pub type Barrier = AllowUnpaidExecutionFrom; + +impl system::Config for Test { + type BaseCallFilter = Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Nonce = u64; + type Block = Block; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = SS58Prefix; + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; + type RuntimeTask = (); + type SingleBlockMigrations = (); + type MultiBlockMigrator = (); + type PreInherents = (); + type PostInherents = (); + type PostTransactions = (); +} + +parameter_types! { + pub const ExistentialDeposit: u64 = 1; + pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 50; +} + +impl pallet_balances::Config for Test { + type MaxLocks = MaxLocks; + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; + type FreezeIdentifier = (); + type MaxFreezes = ConstU32<0>; + type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); + type WeightInfo = (); +} + +impl parachain_info::Config for Test {} + +parameter_type_with_key! { + pub ExistentialDeposits: |_currency_id: CurrencyId| -> Balance { + Default::default() + }; +} + +impl orml_tokens::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type Amount = i64; + type CurrencyId = CurrencyId; + type WeightInfo = (); + type ExistentialDeposits = ExistentialDeposits; + type CurrencyHooks = (); + type MaxLocks = ConstU32<100_000>; + type MaxReserves = ConstU32<100_000>; + type ReserveIdentifier = [u8; 8]; + type DustRemovalWhitelist = frame_support::traits::Nothing; +} + +impl orml_currencies::Config for Test { + type MultiCurrency = Tokens; + type NativeCurrency = AdaptedBasicCurrency; + type GetNativeCurrencyId = GetNativeCurrencyId; + type WeightInfo = (); +} +pub type AdaptedBasicCurrency = orml_currencies::BasicCurrencyAdapter; + +pub struct AccountIdToLocation; +impl Convert for AccountIdToLocation { + fn convert(account: AccountId) -> Location { + Junction::AccountId32 { + network: None, + id: account.into(), + } + .into() + } +} + +thread_local! { + pub static SENT_XCM: RefCell)>> = const { RefCell::new(Vec::new()) }; + pub static TRANSACT_ASSET: RefCell> = const { RefCell::new(Vec::new()) }; +} + +pub(crate) fn sent_xcm() -> Vec<(Location, Xcm<()>)> { + SENT_XCM.with(|q| (*q.borrow()).clone()) +} + +pub(crate) fn transact_asset() -> Vec<(Asset, Location)> { + TRANSACT_ASSET.with(|q| (*q.borrow()).clone()) +} + +pub type LocationToAccountId = ( + ParentIsPreset, + SiblingParachainConvertsVia, + AccountId32Aliases, +); + +pub type XcmOriginToCallOrigin = ( + SovereignSignedViaLocation, + RelayChainAsNative, + SiblingParachainAsNative, + SignedAccountId32AsNative, + XcmPassthrough, +); + +/// Sender that returns error if call equals [9,9,9] +pub struct TestSendXcm; +impl SendXcm for TestSendXcm { + type Ticket = (); + + fn validate( + destination: &mut Option, + message: &mut Option, + ) -> SendResult { + let err_message = Xcm(vec![Transact { + origin_kind: OriginKind::Native, + require_weight_at_most: Weight::from_parts(100_000, 0), + call: vec![9, 1, 1].into(), + }]); + if message.clone().unwrap() == err_message { + Err(SendError::Transport("Destination location full")) + } else { + SENT_XCM.with(|q| { + q.borrow_mut() + .push((((*destination).clone()).unwrap(), message.clone().unwrap())) + }); + Ok(((), Assets::new())) + } + } + + fn deliver(_: Self::Ticket) -> Result { + Ok(XcmHash::default()) + } +} + +// XCMP Mocks +parameter_types! { + pub const UnitWeightCost: u64 = 10; + pub const MaxInstructions: u32 = 100; +} +pub struct DummyWeightTrader; +impl WeightTrader for DummyWeightTrader { + fn new() -> Self { + DummyWeightTrader + } + + fn buy_weight( + &mut self, + _weight: Weight, + _payment: AssetsInHolding, + _context: &XcmContext, + ) -> Result { + Ok(AssetsInHolding::default()) + } +} +pub struct DummyAssetTransactor; +impl TransactAsset for DummyAssetTransactor { + fn deposit_asset(what: &Asset, who: &Location, _context: Option<&XcmContext>) -> XcmResult { + let asset = what.clone(); + TRANSACT_ASSET.with(|q| q.borrow_mut().push((asset, who.clone()))); + Ok(()) + } + + fn withdraw_asset( + what: &Asset, + who: &Location, + _maybe_context: Option<&XcmContext>, + ) -> Result { + let asset = what.clone(); + TRANSACT_ASSET.with(|q| q.borrow_mut().push((asset.clone(), who.clone()))); + Ok(asset.into()) + } +} + +parameter_types! { + pub const RelayNetwork: NetworkId = NetworkId::Polkadot; + pub UniversalLocation: InteriorLocation = + Parachain(2114).into(); + pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); +} +pub struct XcmConfig; +impl staging_xcm_executor::Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = TestSendXcm; + type AssetTransactor = DummyAssetTransactor; + type OriginConverter = XcmOriginToCallOrigin; + type IsReserve = (); + type IsTeleporter = (); + type UniversalLocation = UniversalLocation; + type Barrier = Barrier; + type Weigher = FixedWeightBounds; + type Trader = DummyWeightTrader; + type ResponseHandler = (); + type AssetTrap = XcmPallet; + type AssetClaims = XcmPallet; + type SubscriptionService = XcmPallet; + + type PalletInstancesInfo = AllPalletsWithSystem; + type MaxAssetsIntoHolding = ConstU32<64>; + type AssetLocker = (); + type AssetExchanger = (); + type FeeManager = (); + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; + type Aliasers = Nothing; + type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); + type XcmRecorder = (); +} + +#[cfg(feature = "runtime-benchmarks")] +parameter_types! { + pub ReachableDest: Option = Some(Parent.into()); +} + +impl pallet_xcm::Config for Test { + type RuntimeEvent = RuntimeEvent; + type SendXcmOrigin = EnsureXcmOrigin; + type XcmRouter = (); + type ExecuteXcmOrigin = EnsureXcmOrigin; + type XcmExecuteFilter = Nothing; + type XcmExecutor = XcmExecutor; + type XcmTeleportFilter = Nothing; + type XcmReserveTransferFilter = Everything; + type Weigher = FixedWeightBounds; + type UniversalLocation = UniversalLocation; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; + type Currency = Balances; + type CurrencyMatcher = (); + type TrustedLockers = (); + type SovereignAccountOf = LocationToAccountId; + type MaxLockers = ConstU32<8>; + type MaxRemoteLockConsumers = ConstU32<0>; + type WeightInfo = pallet_xcm::TestWeightInfo; + type RemoteLockConsumerIdentifier = (); + type AdminOrigin = system::EnsureRoot; +} + +impl cumulus_pallet_xcm::Config for Test { + type RuntimeEvent = RuntimeEvent; + type XcmExecutor = XcmExecutor; +} + +pub struct TokenIdConvert; +impl Convert> for TokenIdConvert { + fn convert(_id: CurrencyId) -> Option { + // Mock implementation with default value + Some(Location { + parents: 1, + interior: Here, + }) + } +} + +parameter_types! { + pub const GetNativeCurrencyId: CurrencyId = NATIVE; + pub Ancestry: Location = Parachain(ParachainInfo::parachain_id().into()).into(); + pub SelfLocation: Location = Location::new(1, Parachain(ParachainInfo::parachain_id().into())); +} + +impl pallet_xcmp_handler::Config for Test { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type CurrencyId = CurrencyId; + type MultiCurrency = Currencies; + type GetNativeCurrencyId = GetNativeCurrencyId; + type SelfParaId = parachain_info::Pallet; + type AccountIdToLocation = AccountIdToLocation; + type CurrencyIdToLocation = TokenIdConvert; + type UniversalLocation = UniversalLocation; + type XcmExecutor = XcmExecutor; + type XcmSender = TestSendXcm; + type Weigher = FixedWeightBounds; + type ReserveProvider = AbsoluteAndRelativeReserveProvider; + type SelfLocation = SelfLocation; +} + +// Build genesis storage according to the mock runtime. +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut storage = frame_system::GenesisConfig::::default() + .build_storage() + .unwrap(); + + parachain_info::GenesisConfig:: { + parachain_id: LOCAL_PARA_ID.into(), + ..Default::default() + } + .assimilate_storage(&mut storage) + .unwrap(); + + let mut ext = sp_io::TestExternalities::new(storage); + ext.execute_with(|| System::set_block_number(1)); + ext +} diff --git a/pallets/xcmp-handler/src/tests.rs b/pallets/xcmp-handler/src/tests.rs new file mode 100644 index 000000000..b141e42e8 --- /dev/null +++ b/pallets/xcmp-handler/src/tests.rs @@ -0,0 +1,402 @@ +// This file is part of Ava Protocol. + +// Copyright (C) 2022 Ava Protocol +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +use crate::{mock::*, Error, InstructionSequence}; +use frame_support::{assert_noop, assert_ok}; +use frame_system::RawOrigin; +use polkadot_parachain_primitives::primitives::Sibling; +use sp_runtime::traits::{AccountIdConversion, Convert}; +use staging_xcm::latest::{prelude::*, Weight}; + +//***************** +//Extrinsics +//***************** + +const PARA_ID: u32 = 1000; + +//***************** +//Helper functions +//***************** + +// get_instruction_set +#[test] +fn get_instruction_set_local_currency_instructions() { + let destination = Location::new(1, Parachain(PARA_ID)); + let asset_location = Location::new(1, Parachain(PARA_ID)); + + new_test_ext().execute_with(|| { + let transact_encoded_call: Vec = vec![0, 1, 2]; + let transact_encoded_call_weight = Weight::from_parts(100_000_000, 0); + let overall_weight = Weight::from_parts(200_000_000, 0); + let descend_location: Junctions = AccountIdToLocation::convert(ALICE).try_into().unwrap(); + + let expected_instructions = XcmpHandler::get_local_currency_instructions( + destination.clone(), + asset_location.clone(), + descend_location, + transact_encoded_call.clone(), + transact_encoded_call_weight, + overall_weight, + 10, + ) + .unwrap(); + + assert_eq!( + XcmpHandler::get_instruction_set( + destination, + asset_location, + 10, + ALICE, + transact_encoded_call, + transact_encoded_call_weight, + overall_weight, + InstructionSequence::PayThroughSovereignAccount, + ) + .unwrap(), + expected_instructions + ); + }); +} + +// get_local_currency_instructions +// TODO: use xcm_simulator to test these instructions. +#[test] +fn get_local_currency_instructions_works() { + new_test_ext().execute_with(|| { + let destination = Location::new(1, Parachain(PARA_ID)); + let asset_location = Location::new(1, Parachain(PARA_ID)); + let transact_encoded_call: Vec = vec![0, 1, 2]; + let transact_encoded_call_weight = Weight::from_parts(100_000_000, 0); + let xcm_weight = transact_encoded_call_weight + .checked_add(&Weight::from_parts(100_000_000, 0)) + .expect("xcm_weight overflow"); + let xcm_fee = (xcm_weight.ref_time() as u128) * 5_000_000_000; + let descend_location: Junctions = AccountIdToLocation::convert(ALICE).try_into().unwrap(); + + let (local, target) = XcmpHandler::get_local_currency_instructions( + destination, + asset_location, + descend_location, + transact_encoded_call, + transact_encoded_call_weight, + xcm_weight, + xcm_fee, + ) + .unwrap(); + assert_eq!(local.0.len(), 2); + assert_eq!(target.0.len(), 6); + }); +} + +#[test] +fn transact_in_local_chain_works() { + new_test_ext().execute_with(|| { + let destination = Location::new(1, Parachain(PARA_ID)); + let asset_location = destination.clone(); + let transact_encoded_call: Vec = vec![0, 1, 2]; + let transact_encoded_call_weight = Weight::from_parts(100_000_000, 0); + let xcm_weight = transact_encoded_call_weight + .checked_add(&Weight::from_parts(100_000_000, 0)) + .expect("xcm_weight overflow"); + let xcm_fee = (xcm_weight.ref_time() as u128) * 5_000_000_000; + let asset = Asset { + id: asset_location.clone().into(), + fun: Fungible(xcm_fee), + }; + let descend_location: Junctions = AccountIdToLocation::convert(ALICE).try_into().unwrap(); + + let (local_instructions, _) = XcmpHandler::get_local_currency_instructions( + destination.clone(), + asset_location, + descend_location, + transact_encoded_call, + transact_encoded_call_weight, + xcm_weight, + xcm_fee, + ) + .unwrap(); + + assert_ok!(XcmpHandler::transact_in_local_chain(local_instructions)); + assert_eq!( + transact_asset(), + vec![ + // Withdrawing asset + ( + asset, + Location { + parents: 1, + interior: Parachain(LOCAL_PARA_ID).into() + } + ), + ] + ); + assert_eq!( + events(), + [RuntimeEvent::XcmpHandler( + crate::Event::XcmTransactedLocally + )] + ); + }); +} + +#[test] +fn transact_in_target_chain_works() { + new_test_ext().execute_with(|| { + let destination = Location::new(1, Parachain(PARA_ID)); + let asset_location = Location { + parents: 1, + interior: Parachain(LOCAL_PARA_ID).into(), + }; + let transact_encoded_call: Vec = vec![0, 1, 2]; + let transact_encoded_call_weight = Weight::from_parts(100_000_000, 0); + let xcm_weight = transact_encoded_call_weight + .checked_add(&Weight::from_parts(100_000_000, 0)) + .expect("xcm_weight overflow"); + let xcm_fee = (xcm_weight.ref_time() as u128) * 5_000_000_000; + let asset = Asset { + id: asset_location.clone().into(), + fun: Fungible(xcm_fee), + }; + let descend_location: Junctions = AccountIdToLocation::convert(ALICE).try_into().unwrap(); + + let (_, target_instructions) = XcmpHandler::get_local_currency_instructions( + destination.clone(), + asset_location, + descend_location, + transact_encoded_call.clone(), + transact_encoded_call_weight, + xcm_weight, + xcm_fee, + ) + .unwrap(); + + assert_ok!(XcmpHandler::transact_in_target_chain( + destination.clone(), + target_instructions + )); + assert_eq!( + sent_xcm(), + vec![( + Location { + parents: 1, + interior: Parachain(PARA_ID).into() + }, + Xcm([ + ReserveAssetDeposited(asset.into()), + BuyExecution { + fees: Asset { + id: Location { + parents: 1, + interior: Parachain(LOCAL_PARA_ID).into() + } + .into(), + fun: Fungible(xcm_fee), + }, + weight_limit: Limited(xcm_weight), + }, + DescendOrigin( + AccountId32 { + network: None, + id: ALICE.into() + } + .into() + ), + Transact { + origin_kind: OriginKind::SovereignAccount, + require_weight_at_most: transact_encoded_call_weight, + call: transact_encoded_call.into(), + }, + RefundSurplus, + DepositAsset { + assets: Wild(AllCounted(1)), + beneficiary: Location { + parents: 1, + interior: Parachain(LOCAL_PARA_ID).into(), + }, + }, + ] + .to_vec()), + )] + ); + assert_eq!( + events(), + [RuntimeEvent::XcmpHandler(crate::Event::XcmSent { + destination + })] + ); + }); +} + +#[test] +fn transact_in_target_chain_with_to_reserved_currency_works() { + new_test_ext().execute_with(|| { + let destination = Location::new(1, Parachain(PARA_ID)); + let asset_location = Location { + parents: 1, + interior: Parachain(PARA_ID).into(), + }; + let transact_encoded_call: Vec = vec![0, 1, 2]; + let transact_encoded_call_weight = Weight::from_parts(100_000_000, 0); + let xcm_weight = transact_encoded_call_weight + .checked_add(&Weight::from_parts(100_000_000, 0)) + .expect("xcm_weight overflow"); + let xcm_fee = (xcm_weight.ref_time() as u128) * 5_000_000_000; + let target_asset = Asset { + id: Location { + parents: 0, + interior: Here, + } + .into(), + fun: Fungible(xcm_fee), + }; + let descend_location: Junctions = AccountIdToLocation::convert(ALICE).try_into().unwrap(); + + let (_, target_instructions) = XcmpHandler::get_local_currency_instructions( + destination.clone(), + asset_location, + descend_location, + transact_encoded_call.clone(), + transact_encoded_call_weight, + xcm_weight, + xcm_fee, + ) + .unwrap(); + + assert_ok!(XcmpHandler::transact_in_target_chain( + destination.clone(), + target_instructions + )); + assert_eq!( + sent_xcm(), + vec![( + Location { + parents: 1, + interior: Parachain(PARA_ID).into() + }, + Xcm([ + WithdrawAsset(target_asset.clone().into()), + BuyExecution { + fees: target_asset, + weight_limit: Limited(xcm_weight) + }, + DescendOrigin( + AccountId32 { + network: None, + id: ALICE.into() + } + .into() + ), + Transact { + origin_kind: OriginKind::SovereignAccount, + require_weight_at_most: transact_encoded_call_weight, + call: transact_encoded_call.into(), + }, + RefundSurplus, + DepositAsset { + assets: Wild(AllCounted(1)), + beneficiary: Location { + parents: 1, + interior: Parachain(LOCAL_PARA_ID).into(), + }, + }, + ] + .to_vec()), + )] + ); + assert_eq!( + events(), + [RuntimeEvent::XcmpHandler(crate::Event::XcmSent { + destination + })] + ); + }); +} + +#[test] +fn transact_in_target_chain_with_non_reserved_currency_will_throw_unsupported_fee_payment_error() { + new_test_ext().execute_with(|| { + let destination = Location::new(1, Parachain(PARA_ID)); + let asset_location = Location { + parents: 1, + interior: Parachain(3000).into(), + }; + let transact_encoded_call: Vec = vec![0, 1, 2]; + let transact_encoded_call_weight = Weight::from_parts(100_000_000, 0); + let xcm_weight = transact_encoded_call_weight + .checked_add(&Weight::from_parts(100_000_000, 0)) + .expect("xcm_weight overflow"); + let xcm_fee = (xcm_weight.ref_time() as u128) * 5_000_000_000; + let descend_location: Junctions = AccountIdToLocation::convert(ALICE).try_into().unwrap(); + + assert_noop!( + XcmpHandler::get_local_currency_instructions( + destination, + asset_location, + descend_location, + transact_encoded_call, + transact_encoded_call_weight, + xcm_weight, + xcm_fee, + ), + Error::::UnsupportedFeePayment, + ); + }); +} + +#[test] +fn pay_xcm_fee_works() { + new_test_ext().execute_with(|| { + let local_sovereign_account: AccountId = + Sibling::from(LOCAL_PARA_ID).into_account_truncating(); + let fee = 3_500_000; + let alice_balance = 8_000_000; + let currency_id = 0; + + Balances::force_set_balance(RawOrigin::Root.into(), ALICE, alice_balance).unwrap(); + + assert_ok!(XcmpHandler::pay_xcm_fee(currency_id, ALICE, fee)); + assert_eq!(Balances::free_balance(ALICE), alice_balance - fee); + assert_eq!(Balances::free_balance(local_sovereign_account), fee); + }); +} + +#[test] +fn pay_xcm_fee_keeps_wallet_alive() { + new_test_ext().execute_with(|| { + let local_sovereign_account: AccountId = + Sibling::from(LOCAL_PARA_ID).into_account_truncating(); + let fee = 3_500_000; + let alice_balance = fee; + let currency_id = 0; + + Balances::force_set_balance(RawOrigin::Root.into(), ALICE, alice_balance).unwrap(); + + assert_ok!(XcmpHandler::pay_xcm_fee(currency_id, ALICE, fee)); + assert_eq!(Balances::free_balance(ALICE), alice_balance); + assert_eq!(Balances::free_balance(local_sovereign_account), 0); + }); +} + +fn events() -> Vec { + let evt = System::events() + .into_iter() + .map(|evt| evt.event) + .collect::>(); + + System::reset_events(); + + evt +} diff --git a/test/moonwall.config.json b/test/moonwall.config.json index c7b295b31..dfd9ab6ee 100644 --- a/test/moonwall.config.json +++ b/test/moonwall.config.json @@ -533,7 +533,7 @@ "download-latest-rt-binaries.sh", "build-spec-single-container.sh tmp container-chain-simple-node", "download-polkadot.sh", - "compile-wasm.ts compile -b tmp/tanssi-node -o wasm -c specs/single-container-tanssi-1000.json", + "compile-wasm.ts compile -b ../target/release/tanssi-node -o wasm -c specs/single-container-tanssi-1000.json", "compile-wasm.ts compile -b tmp/container-chain-simple-node -o wasm -c specs/single-container-template-container-2000.json" ], diff --git a/test/scripts/build-spec-single-container.sh b/test/scripts/build-spec-single-container.sh index 3fe4d3486..8eef1e822 100755 --- a/test/scripts/build-spec-single-container.sh +++ b/test/scripts/build-spec-single-container.sh @@ -20,4 +20,4 @@ fi mkdir -p specs $BINARY_FOLDER/$CONTAINER_BINARY build-spec --disable-default-bootnode --add-bootnode "/ip4/127.0.0.1/tcp/33049/ws/p2p/12D3KooWHVMhQDHBpj9vQmssgyfspYecgV6e3hH1dQVDUkUbCYC9" --parachain-id 2000 --raw > specs/single-container-template-container-2000.json -$BINARY_FOLDER/tanssi-node build-spec --chain dancebox-local --parachain-id 1000 --add-container-chain specs/single-container-template-container-2000.json --invulnerable "Collator1000-01" --invulnerable "Collator1000-02" --invulnerable "Collator1000-03" --invulnerable "Collator2000-01" --invulnerable "Collator2000-02" --raw > specs/single-container-tanssi-1000.json +../target/release/tanssi-node build-spec --chain dancebox-local --parachain-id 1000 --add-container-chain specs/single-container-template-container-2000.json --invulnerable "Collator1000-01" --invulnerable "Collator1000-02" --invulnerable "Collator1000-03" --invulnerable "Collator2000-01" --invulnerable "Collator2000-02" --raw > specs/single-container-tanssi-1000.json diff --git a/test/scripts/download-polkadot.sh b/test/scripts/download-polkadot.sh index 3d0389255..c7f8931b3 100755 --- a/test/scripts/download-polkadot.sh +++ b/test/scripts/download-polkadot.sh @@ -7,7 +7,9 @@ set -e cd $(dirname $0)/.. # Grab Polkadot version -branch=$(egrep -o '/polkadot.*#([^\"]*)' ../Cargo.lock | head -1 | sed 's/.*release-//#') +# branch=$(egrep -o '/polkadot.*#([^\"]*)' ../Cargo.lock | head -1 | sed 's/.*release-//#') +# This line fix an error about sed command in my Mac OS +branch=$(egrep -o '/polkadot.*#([^\"]*)' ../Cargo.lock | head -1 | sed 's|.*release-|//#|') polkadot_release=$(echo $branch | sed 's/#.*//' | sed 's/\/polkadot-sdk?branch=tanssi-polkadot-v//') # There is a bug where moonwall saves a html file as an executable, and we try to execute that html file. diff --git a/test/suites/parathreads/test_tanssi_parathreads.ts b/test/suites/parathreads/test_tanssi_parathreads.ts index bc5b4ea8e..1d78bca60 100644 --- a/test/suites/parathreads/test_tanssi_parathreads.ts +++ b/test/suites/parathreads/test_tanssi_parathreads.ts @@ -1,14 +1,12 @@ import { beforeAll, describeSuite, expect } from "@moonwall/cli"; -import { MIN_GAS_PRICE, customWeb3Request, generateKeyringPair, getBlockArray } from "@moonwall/util"; +import { getBlockArray } from "@moonwall/util"; import { ApiPromise, Keyring } from "@polkadot/api"; -import { Signer } from "ethers"; import fs from "fs/promises"; import { getAuthorFromDigest } from "../../util/author"; import { signAndSendAndInclude, waitSessions } from "../../util/block"; import { getHeaderFromRelay } from "../../util/relayInterface"; import { chainSpecToContainerChainGenesisData } from "../../util/genesis_data.ts"; import jsonBg from "json-bigint"; -import { createTransfer, waitUntilEthTxIncluded } from "../../util/ethereum.ts"; import { getKeyringNimbusIdHex } from "../../util/keys.ts"; import Bottleneck from "bottleneck"; import { stringToHex } from "@polkadot/util"; @@ -23,7 +21,6 @@ describeSuite({ let relayApi: ApiPromise; let container2000Api: ApiPromise; let container2001Api: ApiPromise; - let ethersSigner: Signer; let allCollators: string[]; let collatorName: Record; @@ -32,7 +29,6 @@ describeSuite({ relayApi = context.polkadotJs("Relay"); container2000Api = context.polkadotJs("Container2000"); container2001Api = context.polkadotJs("Container2001"); - ethersSigner = context.ethers(); const relayNetwork = relayApi.consts.system.version.specName.toString(); expect(relayNetwork, "Relay API incorrect").to.contain("rococo"); @@ -211,21 +207,21 @@ describeSuite({ slotFrequency2000, nextProfileId ); - const slotFrequency2001 = paraApi.createType("TpTraitsSlotFrequency", { - min: 5, - max: 5, - }); - const responseFor2001 = await createTxBatchForCreatingParathread( - paraApi, - alice.address, - 2001, - slotFrequency2001, - responseFor2000.nextProfileId - ); + // const slotFrequency2001 = paraApi.createType("TpTraitsSlotFrequency", { + // min: 5, + // max: 5, + // }); + // const responseFor2001 = await createTxBatchForCreatingParathread( + // paraApi, + // alice.address, + // 2001, + // slotFrequency2001, + // responseFor2000.nextProfileId + // ); // Cram everything in one array const txs = responseFor2000.txs; - txs.push(...responseFor2001.txs); + // txs.push(...responseFor2001.txs); await signAndSendAndInclude(paraApi.tx.sudo.sudo(paraApi.tx.utility.batch(txs)), alice); const pendingParas = await paraApi.query.registrar.pendingParaIds(); @@ -236,17 +232,17 @@ describeSuite({ // These will be the paras in session 2 // TODO: fix once we have types - expect(parasScheduled.toJSON()).to.deep.equal([2000, 2001]); + expect(parasScheduled.toJSON()).to.deep.equal([2000]); // Check the para id has been given some free credits expect( (await paraApi.query.servicesPayment.blockProductionCredits(2000)).toJSON(), "Container chain 2000 should have been given credits" ).toBeGreaterThan(0); - expect( - (await paraApi.query.servicesPayment.blockProductionCredits(2001)).toJSON(), - "Container chain 2001 should have been given credits" - ).toBeGreaterThan(0); + // expect( + // (await paraApi.query.servicesPayment.blockProductionCredits(2001)).toJSON(), + // "Container chain 2001 should have been given credits" + // ).toBeGreaterThan(0); // Checking that in session 2 paras are registered await waitSessions(context, paraApi, 2); @@ -254,14 +250,15 @@ describeSuite({ // Expect now paraIds to be registered const parasRegistered = await paraApi.query.registrar.registeredParaIds(); // TODO: fix once we have types - expect(parasRegistered.toJSON()).to.deep.equal([2000, 2001]); + // expect(parasRegistered.toJSON()).to.deep.equal([2000, 2001]); + expect(parasRegistered.toJSON()).to.deep.equal([2000]); // Check that collators have been assigned const collators = await paraApi.query.collatorAssignment.collatorContainerChain(); console.log(collators.toJSON()); expect(collators.toJSON().containerChains[2000].length).to.be.greaterThan(0); - expect(collators.toJSON().containerChains[2001].length).to.be.greaterThan(0); + // expect(collators.toJSON().containerChains[2001].length).to.be.greaterThan(0); }, }); @@ -277,19 +274,19 @@ describeSuite({ }, }); - it({ - id: "T05", - title: "Blocks are being produced on container 2001", - test: async function () { - // Produces 1 block every 5 slots, which is every 30 seconds - // Give it a bit more time just in case - await sleep(120000); - const blockNum = (await container2001Api.rpc.chain.getBlock()).block.header.number.toNumber(); + // it({ + // id: "T05", + // title: "Blocks are being produced on container 2001", + // test: async function () { + // // Produces 1 block every 5 slots, which is every 30 seconds + // // Give it a bit more time just in case + // await sleep(120000); + // const blockNum = (await container2001Api.rpc.chain.getBlock()).block.header.number.toNumber(); - expect(blockNum).to.be.greaterThan(0); - expect(await ethersSigner.provider.getBlockNumber(), "Safe tag is not present").to.be.greaterThan(0); - }, - }); + // expect(blockNum).to.be.greaterThan(0); + // expect(await ethersSigner.provider.getBlockNumber(), "Safe tag is not present").to.be.greaterThan(0); + // }, + // }); it({ id: "T06", @@ -308,21 +305,21 @@ describeSuite({ }, }); - it({ - id: "T07", - title: "Test container chain 2001 assignation is correct", - test: async function () { - const currentSession = (await paraApi.query.session.currentIndex()).toNumber(); - const paraId = (await container2001Api.query.parachainInfo.parachainId()).toString(); - const containerChainCollators = ( - await paraApi.query.authorityAssignment.collatorContainerChain(currentSession) - ).toJSON().containerChains[paraId]; + // it({ + // id: "T07", + // title: "Test container chain 2001 assignation is correct", + // test: async function () { + // const currentSession = (await paraApi.query.session.currentIndex()).toNumber(); + // const paraId = (await container2001Api.query.parachainInfo.parachainId()).toString(); + // const containerChainCollators = ( + // await paraApi.query.authorityAssignment.collatorContainerChain(currentSession) + // ).toJSON().containerChains[paraId]; - const writtenCollators = (await container2001Api.query.authoritiesNoting.authorities()).toJSON(); + // const writtenCollators = (await container2001Api.query.authoritiesNoting.authorities()).toJSON(); - expect(containerChainCollators).to.deep.equal(writtenCollators); - }, - }); + // expect(containerChainCollators).to.deep.equal(writtenCollators); + // }, + // }); it({ id: "T08", @@ -331,18 +328,17 @@ describeSuite({ test: async function () { const assignment = await paraApi.query.collatorAssignment.collatorContainerChain(); const paraId2000 = await container2000Api.query.parachainInfo.parachainId(); - const paraId2001 = await container2001Api.query.parachainInfo.parachainId(); // TODO: fix once we have types const containerChainCollators2000 = assignment.containerChains.toJSON()[paraId2000.toString()]; - const containerChainCollators2001 = assignment.containerChains.toJSON()[paraId2001.toString()]; + // const containerChainCollators2001 = assignment.containerChains.toJSON()[paraId2001.toString()]; await context.waitBlock(3, "Tanssi"); const author2000 = await paraApi.query.authorNoting.latestAuthor(paraId2000); - const author2001 = await paraApi.query.authorNoting.latestAuthor(paraId2001); + // const author2001 = await paraApi.query.authorNoting.latestAuthor(paraId2001); expect(containerChainCollators2000.includes(author2000.toJSON().author)).to.be.true; - expect(containerChainCollators2001.includes(author2001.toJSON().author)).to.be.true; + // expect(containerChainCollators2001.includes(author2001.toJSON().author)).to.be.true; }, }); @@ -358,35 +354,35 @@ describeSuite({ }, }); - it({ - id: "T10", - title: "Test frontier template isEthereum", - test: async function () { - // TODO: fix once we have types - const genesisData2000 = await paraApi.query.registrar.paraGenesisData(2000); - expect(genesisData2000.toJSON().properties.isEthereum).to.be.false; - const genesisData2001 = await paraApi.query.registrar.paraGenesisData(2001); - expect(genesisData2001.toJSON().properties.isEthereum).to.be.true; - }, - }); - it({ - id: "T11", - title: "Transactions can be made with ethers", - timeout: 120000, - test: async function () { - const randomAccount = generateKeyringPair(); - const tx = await createTransfer(context, randomAccount.address, 1_000_000_000_000, { - gasPrice: MIN_GAS_PRICE, - }); - const txHash = await customWeb3Request(context.web3(), "eth_sendRawTransaction", [tx]); - await waitUntilEthTxIncluded( - () => context.waitBlock(1, "Container2001"), - context.web3(), - txHash.result - ); - expect(Number(await context.web3().eth.getBalance(randomAccount.address))).to.be.greaterThan(0); - }, - }); + // it({ + // id: "T10", + // title: "Test frontier template isEthereum", + // test: async function () { + // // TODO: fix once we have types + // const genesisData2000 = await paraApi.query.registrar.paraGenesisData(2000); + // expect(genesisData2000.toJSON().properties.isEthereum).to.be.false; + // const genesisData2001 = await paraApi.query.registrar.paraGenesisData(2001); + // expect(genesisData2001.toJSON().properties.isEthereum).to.be.true; + // }, + // }); + // it({ + // id: "T11", + // title: "Transactions can be made with ethers", + // timeout: 120000, + // test: async function () { + // const randomAccount = generateKeyringPair(); + // const tx = await createTransfer(context, randomAccount.address, 1_000_000_000_000, { + // gasPrice: MIN_GAS_PRICE, + // }); + // const txHash = await customWeb3Request(context.web3(), "eth_sendRawTransaction", [tx]); + // await waitUntilEthTxIncluded( + // () => context.waitBlock(1, "Container2001"), + // context.web3(), + // txHash.result + // ); + // expect(Number(await context.web3().eth.getBalance(randomAccount.address))).to.be.greaterThan(0); + // }, + // }); it({ id: "T12", title: "Check block frequency of parathreads", @@ -398,7 +394,7 @@ describeSuite({ // TODO: calculate block frequency somehow assertSlotFrequency(await getBlockData(paraApi), 1); assertSlotFrequency(await getBlockData(container2000Api), 10); - assertSlotFrequency(await getBlockData(container2001Api), 10); + // assertSlotFrequency(await getBlockData(container2001Api), 10); }, }); },